diff --git a/.gitignore b/.gitignore index d1810a5a83f..b4ec8795057 100644 --- a/.gitignore +++ b/.gitignore @@ -33,6 +33,7 @@ dependency-reduced-pom.xml # testing stuff **/.local* .vagrant/ +/logs/ # osx stuff .DS_Store diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index da81436b8ad..5885bf9def7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -88,7 +88,8 @@ Contributing to the Elasticsearch codebase **Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch) Make sure you have [Gradle](http://gradle.org) installed, as -Elasticsearch uses it as its build system. +Elasticsearch uses it as its build system. Gradle must be version 2.13 _exactly_ in +order to build successfully. Eclipse users can automatically configure their IDE: `gradle eclipse` then `File: Import: Existing Projects into Workspace`. Select the diff --git a/README.textile b/README.textile index 69d3fd54767..dc3a263cd7c 100644 --- a/README.textile +++ b/README.textile @@ -123,7 +123,7 @@ There are many more options to perform search, after all, it's a search product h3. Multi Tenant - Indices and Types -Maan, that twitter index might get big (in this case, index size == valuation). Let's see if we can structure our twitter system a bit differently in order to support such large amounts of data. +Man, that twitter index might get big (in this case, index size == valuation). Let's see if we can structure our twitter system a bit differently in order to support such large amounts of data. Elasticsearch supports multiple indices, as well as multiple types per index. In the previous example we used an index called @twitter@, with two types, @user@ and @tweet@. @@ -200,7 +200,7 @@ We have just covered a very small portion of what Elasticsearch is all about. Fo h3. Building from Source -Elasticsearch uses "Gradle":https://gradle.org for its build system. You'll need to have a modern version of Gradle installed - 2.13 should do. +Elasticsearch uses "Gradle":https://gradle.org for its build system. You'll need to have version 2.13 of Gradle installed. In order to create a distribution, simply run the @gradle assemble@ command in the cloned directory. diff --git a/TESTING.asciidoc b/TESTING.asciidoc index dd6c093047a..6b14e7f4f7f 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -16,22 +16,6 @@ following: gradle assemble ----------------------------- -== Other test options - -To disable and enable network transport, set the `tests.es.node.mode` system property. - -Use network transport: - ------------------------------------- --Dtests.es.node.mode=network ------------------------------------- - -Use local transport (default since 1.3): - -------------------------------------- --Dtests.es.node.mode=local -------------------------------------- - === Running Elasticsearch from a checkout In order to run Elasticsearch from source without building a package, you can @@ -41,6 +25,12 @@ run it using Gradle: gradle run ------------------------------------- +or to attach a remote debugger, run it as: + +------------------------------------- +gradle run --debug-jvm +------------------------------------- + === Test case filtering. - `tests.class` is a class-filtering shell-like glob pattern, @@ -363,7 +353,6 @@ These are the linux flavors the Vagrantfile currently supports: * ubuntu-1204 aka precise * ubuntu-1404 aka trusty -* ubuntu-1504 aka vivid * ubuntu-1604 aka xenial * debian-8 aka jessie, the current debian stable distribution * centos-6 diff --git a/Vagrantfile b/Vagrantfile index 761ef20628d..96151724d13 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -30,13 +30,6 @@ Vagrant.configure(2) do |config| config.vm.box = "elastic/ubuntu-14.04-x86_64" ubuntu_common config end - config.vm.define "ubuntu-1504" do |config| - config.vm.box = "elastic/ubuntu-15.04-x86_64" - ubuntu_common config, extra: <<-SHELL - # Install Jayatana so we can work around it being present. - [ -f /usr/share/java/jayatanaag.jar ] || install jayatana - SHELL - end config.vm.define "ubuntu-1604" do |config| config.vm.box = "elastic/ubuntu-16.04-x86_64" ubuntu_common config, extra: <<-SHELL @@ -156,6 +149,7 @@ def dnf_common(config) update_command: "dnf check-update", update_tracking_file: "/var/cache/dnf/last_update", install_command: "dnf install -y", + install_command_retries: 5, java_package: "java-1.8.0-openjdk-devel") if Vagrant.has_plugin?("vagrant-cachier") # Autodetect doesn't work.... @@ -205,6 +199,7 @@ def provision(config, update_command: 'required', update_tracking_file: 'required', install_command: 'required', + install_command_retries: 0, java_package: 'required', extra: '') # Vagrant run ruby 2.0.0 which doesn't have required named parameters.... @@ -215,9 +210,27 @@ def provision(config, config.vm.provision "bats dependencies", type: "shell", inline: <<-SHELL set -e set -o pipefail + + # Retry install command up to $2 times, if failed + retry_installcommand() { + n=0 + while true; do + #{install_command} $1 && break + let n=n+1 + if [ $n -ge $2 ]; then + echo "==> Exhausted retries to install $1" + return 1 + fi + echo "==> Retrying installing $1, attempt $((n+1))" + # Add a small delay to increase chance of metalink providing updated list of mirrors + sleep 5 + done + } + installed() { command -v $1 2>&1 >/dev/null } + install() { # Only apt-get update if we haven't in the last day if [ ! -f #{update_tracking_file} ] || [ "x$(find #{update_tracking_file} -mtime +0)" == "x#{update_tracking_file}" ]; then @@ -226,8 +239,14 @@ def provision(config, touch #{update_tracking_file} fi echo "==> Installing $1" - #{install_command} $1 + if [ #{install_command_retries} -eq 0 ] + then + #{install_command} $1 + else + retry_installcommand $1 #{install_command_retries} + fi } + ensure() { installed $1 || install $1 } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java index 39cfdb6582d..173a293f3e5 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java @@ -58,57 +58,57 @@ public class AllocationBenchmark { // support to constrain the combinations of benchmark parameters and we do not want to rely on OptionsBuilder as each benchmark would // need its own main method and we cannot execute more than one class with a main method per JAR. @Param({ - // indices, shards, replicas, nodes - " 10, 1, 0, 1", - " 10, 3, 0, 1", - " 10, 10, 0, 1", - " 100, 1, 0, 1", - " 100, 3, 0, 1", - " 100, 10, 0, 1", + // indices| shards| replicas| nodes + " 10| 1| 0| 1", + " 10| 3| 0| 1", + " 10| 10| 0| 1", + " 100| 1| 0| 1", + " 100| 3| 0| 1", + " 100| 10| 0| 1", - " 10, 1, 0, 10", - " 10, 3, 0, 10", - " 10, 10, 0, 10", - " 100, 1, 0, 10", - " 100, 3, 0, 10", - " 100, 10, 0, 10", + " 10| 1| 0| 10", + " 10| 3| 0| 10", + " 10| 10| 0| 10", + " 100| 1| 0| 10", + " 100| 3| 0| 10", + " 100| 10| 0| 10", - " 10, 1, 1, 10", - " 10, 3, 1, 10", - " 10, 10, 1, 10", - " 100, 1, 1, 10", - " 100, 3, 1, 10", - " 100, 10, 1, 10", + " 10| 1| 1| 10", + " 10| 3| 1| 10", + " 10| 10| 1| 10", + " 100| 1| 1| 10", + " 100| 3| 1| 10", + " 100| 10| 1| 10", - " 10, 1, 2, 10", - " 10, 3, 2, 10", - " 10, 10, 2, 10", - " 100, 1, 2, 10", - " 100, 3, 2, 10", - " 100, 10, 2, 10", + " 10| 1| 2| 10", + " 10| 3| 2| 10", + " 10| 10| 2| 10", + " 100| 1| 2| 10", + " 100| 3| 2| 10", + " 100| 10| 2| 10", - " 10, 1, 0, 50", - " 10, 3, 0, 50", - " 10, 10, 0, 50", - " 100, 1, 0, 50", - " 100, 3, 0, 50", - " 100, 10, 0, 50", + " 10| 1| 0| 50", + " 10| 3| 0| 50", + " 10| 10| 0| 50", + " 100| 1| 0| 50", + " 100| 3| 0| 50", + " 100| 10| 0| 50", - " 10, 1, 1, 50", - " 10, 3, 1, 50", - " 10, 10, 1, 50", - " 100, 1, 1, 50", - " 100, 3, 1, 50", - " 100, 10, 1, 50", + " 10| 1| 1| 50", + " 10| 3| 1| 50", + " 10| 10| 1| 50", + " 100| 1| 1| 50", + " 100| 3| 1| 50", + " 100| 10| 1| 50", - " 10, 1, 2, 50", - " 10, 3, 2, 50", - " 10, 10, 2, 50", - " 100, 1, 2, 50", - " 100, 3, 2, 50", - " 100, 10, 2, 50" + " 10| 1| 2| 50", + " 10| 3| 2| 50", + " 10| 10| 2| 50", + " 100| 1| 2| 50", + " 100| 3| 2| 50", + " 100| 10| 2| 50" }) - public String indicesShardsReplicasNodes = "10,1,0,1"; + public String indicesShardsReplicasNodes = "10|1|0|1"; public int numTags = 2; @@ -117,7 +117,7 @@ public class AllocationBenchmark { @Setup public void setUp() throws Exception { - final String[] params = indicesShardsReplicasNodes.split(","); + final String[] params = indicesShardsReplicasNodes.split("\\|"); int numIndices = toInt(params[0]); int numShards = toInt(params[1]); diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java index 860137cf559..4d8f7cfeaac 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java @@ -31,15 +31,18 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.gateway.GatewayAllocator; import java.lang.reflect.InvocationTargetException; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; public final class Allocators { private static class NoopGatewayAllocator extends GatewayAllocator { @@ -91,8 +94,11 @@ public final class Allocators { } + private static final AtomicInteger portGenerator = new AtomicInteger(); + public static DiscoveryNode newNode(String nodeId, Map attributes) { - return new DiscoveryNode("", nodeId, LocalTransportAddress.buildUnique(), attributes, Sets.newHashSet(DiscoveryNode.Role.MASTER, + return new DiscoveryNode("", nodeId, new TransportAddress(TransportAddress.META_ADDRESS, + portGenerator.incrementAndGet()), attributes, Sets.newHashSet(DiscoveryNode.Role.MASTER, DiscoveryNode.Role.DATA), Version.CURRENT); } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index ebe4a2bdccc..65402290e01 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -28,6 +28,7 @@ import org.gradle.api.Task import org.gradle.api.XmlProvider import org.gradle.api.artifacts.Configuration import org.gradle.api.artifacts.ModuleDependency +import org.gradle.api.artifacts.ModuleVersionIdentifier import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact import org.gradle.api.artifacts.dsl.RepositoryHandler @@ -294,12 +295,15 @@ class BuildPlugin implements Plugin { * Returns a closure which can be used with a MavenPom for fixing problems with gradle generated poms. * * */ private static Closure fixupDependencies(Project project) { - // TODO: remove this when enforcing gradle 2.14+, it now properly handles exclusions + // TODO: revisit this when upgrading to Gradle 2.14+, see Javadoc comment above return { XmlProvider xml -> // first find if we have dependencies at all, and grab the node NodeList depsNodes = xml.asNode().get('dependencies') @@ -334,10 +338,19 @@ class BuildPlugin implements Plugin { continue } - // we now know we have something to exclude, so add a wildcard exclusion element - Node exclusion = depNode.appendNode('exclusions').appendNode('exclusion') - exclusion.appendNode('groupId', '*') - exclusion.appendNode('artifactId', '*') + // we now know we have something to exclude, so add exclusions for all artifacts except the main one + Node exclusions = depNode.appendNode('exclusions') + for (ResolvedArtifact artifact : artifacts) { + ModuleVersionIdentifier moduleVersionIdentifier = artifact.moduleVersion.id; + String depGroupId = moduleVersionIdentifier.group + String depArtifactId = moduleVersionIdentifier.name + // add exclusions for all artifacts except the main one + if (depGroupId != groupId || depArtifactId != artifactId) { + Node exclusion = exclusions.appendNode('exclusion') + exclusion.appendNode('groupId', depGroupId) + exclusion.appendNode('artifactId', depArtifactId) + } + } } } } @@ -393,7 +406,7 @@ class BuildPlugin implements Plugin { } options.encoding = 'UTF-8' - //options.incremental = true + options.incremental = true if (project.javaVersion == JavaVersion.VERSION_1_9) { // hack until gradle supports java 9's new "--release" arg diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 11bdbd19525..a46a7bda374 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -38,7 +38,7 @@ public class DocsTestPlugin extends RestTestPlugin { * the last released version for docs. */ '\\{version\\}': VersionProperties.elasticsearch.replace('-SNAPSHOT', ''), - '\\{lucene_version\\}' : VersionProperties.lucene, + '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), ] Task listSnippets = project.tasks.create('listSnippets', SnippetsTask) listSnippets.group 'Docs' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy index 41f74b45be1..518b4da439c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy @@ -39,6 +39,7 @@ public class SnippetsTask extends DefaultTask { private static final String SKIP = /skip:([^\]]+)/ private static final String SETUP = /setup:([^ \]]+)/ private static final String WARNING = /warning:(.+)/ + private static final String CAT = /(_cat)/ private static final String TEST_SYNTAX = /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING) ?/ @@ -221,8 +222,17 @@ public class SnippetsTask extends DefaultTask { substitutions = [] } String loc = "$file:$lineNumber" - parse(loc, matcher.group(2), /$SUBSTITUTION ?/) { - substitutions.add([it.group(1), it.group(2)]) + parse(loc, matcher.group(2), /(?:$SUBSTITUTION|$CAT) ?/) { + if (it.group(1) != null) { + // TESTRESPONSE[s/adsf/jkl/] + substitutions.add([it.group(1), it.group(2)]) + } else if (it.group(3) != null) { + // TESTRESPONSE[_cat] + substitutions.add(['^', '/']) + substitutions.add(['\n$', '\\\\s*/']) + substitutions.add(['( +)', '$1\\\\s+']) + substitutions.add(['\n', '\\\\s*\n ']) + } } } return diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index c93ecb4094b..d5295519ad2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -49,8 +49,7 @@ public class PluginBuildPlugin extends BuildPlugin { project.afterEvaluate { boolean isModule = project.path.startsWith(':modules:') String name = project.pluginProperties.extension.name - project.jar.baseName = name - project.bundlePlugin.baseName = name + project.archivesBaseName = name if (project.pluginProperties.extension.hasClientJar) { // for plugins which work with the transport client, we copy the jar @@ -232,6 +231,7 @@ public class PluginBuildPlugin extends BuildPlugin { * ahold of the actual task. Furthermore, this entire hack only exists so we can make publishing to * maven local work, since we publish to maven central externally. */ zipReal(MavenPublication) { + artifactId = project.pluginProperties.extension.name pom.withXml { XmlProvider xml -> Node root = xml.asNode() root.appendNode('name', project.pluginProperties.extension.name) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index 076a564f84a..018f9fde2f2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -143,6 +143,10 @@ public class ThirdPartyAuditTask extends AntTask { if (m.matches()) { missingClasses.add(m.group(1).replace('.', '/') + ".class"); } + + // Reset the priority of the event to DEBUG, so it doesn't + // pollute the build output + event.setMessage(event.getMessage(), Project.MSG_DEBUG); } else if (event.getPriority() == Project.MSG_ERR) { Matcher m = VIOLATION_PATTERN.matcher(event.getMessage()); if (m.matches()) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 48183a07721..07306dd14ea 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -62,6 +62,15 @@ class ClusterConfiguration { @Input boolean debug = false + /** + * if true each node will be configured with discovery.zen.minimum_master_nodes set + * to the total number of nodes in the cluster. This will also cause that each node has `0s` state recovery + * timeout which can lead to issues if for instance an existing clusterstate is expected to be recovered + * before any tests start + */ + @Input + boolean useMinimumMasterNodes = true + @Input String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + @@ -95,11 +104,13 @@ class ClusterConfiguration { @Input Closure waitCondition = { NodeInfo node, AntBuilder ant -> File tmpFile = new File(node.cwd, 'wait.success') - ant.echo("==> [${new Date()}] checking health: http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}") + String waitUrl = "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow" + ant.echo(message: "==> [${new Date()}] checking health: ${waitUrl}", + level: 'info') // checking here for wait_for_nodes to be >= the number of nodes because its possible // this cluster is attempting to connect to nodes created by another task (same cluster name), // so there will be more nodes in that case in the cluster state - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}", + ant.get(src: waitUrl, dest: tmpFile.toString(), ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task retries: 10) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 957e845aa57..2095c892f50 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -73,8 +73,8 @@ class ClusterFormationTasks { } // this is our current version distribution configuration we use for all kinds of REST tests etc. String distroConfigName = "${task.name}_elasticsearchDistro" - Configuration distro = project.configurations.create(distroConfigName) - configureDistributionDependency(project, config.distribution, distro, VersionProperties.elasticsearch) + Configuration currentDistro = project.configurations.create(distroConfigName) + configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch) if (config.bwcVersion != null && config.numBwcNodes > 0) { // if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version // this version uses the same distribution etc. and only differs in the version we depend on. @@ -85,11 +85,11 @@ class ClusterFormationTasks { } configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion) } - - for (int i = 0; i < config.numNodes; ++i) { + for (int i = 0; i < config.numNodes; i++) { // we start N nodes and out of these N nodes there might be M bwc nodes. // for each of those nodes we might have a different configuratioon String elasticsearchVersion = VersionProperties.elasticsearch + Configuration distro = currentDistro if (i < config.numBwcNodes) { elasticsearchVersion = config.bwcVersion distro = project.configurations.elasticsearchBwcDistro @@ -252,9 +252,17 @@ class ClusterFormationTasks { 'path.repo' : "${node.sharedDir}/repo", 'path.shared_data' : "${node.sharedDir}/", // Define a node attribute so we can test that it exists - 'node.attr.testattr' : 'test', + 'node.attr.testattr' : 'test', 'repositories.url.allowed_urls': 'http://snapshot.test*' ] + // we set min master nodes to the total number of nodes in the cluster and + // basically skip initial state recovery to allow the cluster to form using a realistic master election + // this means all nodes must be up, join the seed node and do a master election. This will also allow new and + // old nodes in the BWC case to become the master + if (node.config.useMinimumMasterNodes && node.config.numNodes > 1) { + esConfig['discovery.zen.minimum_master_nodes'] = node.config.numNodes + esConfig['discovery.initial_state_timeout'] = '0s' // don't wait for state.. just start up quickly + } esConfig['node.max_local_storage_nodes'] = node.config.numNodes esConfig['http.port'] = node.config.httpPort esConfig['transport.tcp.port'] = node.config.transportPort diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index d50937408e7..51bccb4fe75 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -55,7 +55,9 @@ public class RestIntegTestTask extends RandomizedTestingTask { parallelism = '1' include('**/*IT.class') systemProperty('tests.rest.load_packaged', 'false') - systemProperty('tests.rest.cluster', "${-> nodes[0].httpUri()}") + // we pass all nodes to the rest cluster to allow the clients to round-robin between them + // this is more realistic than just talking to a single node + systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}") systemProperty('tests.config.dir', "${-> nodes[0].confDir}") // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy index f045c95740b..a71dc59dbf9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy @@ -16,6 +16,7 @@ public class RunTask extends DefaultTask { clusterConfig.httpPort = 9200 clusterConfig.transportPort = 9300 clusterConfig.daemonize = false + clusterConfig.distribution = 'zip' project.afterEvaluate { ClusterFormationTasks.setup(project, this, clusterConfig) } diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index a995c201c47..9c1610741d9 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -38,7 +38,6 @@ - @@ -64,8 +63,6 @@ - - @@ -117,8 +114,6 @@ - - @@ -146,7 +141,6 @@ - @@ -230,7 +224,6 @@ - @@ -245,7 +238,6 @@ - @@ -258,7 +250,6 @@ - @@ -299,7 +290,6 @@ - @@ -310,23 +300,14 @@ - - - - - - - - - @@ -388,7 +369,6 @@ - @@ -419,7 +399,6 @@ - @@ -463,7 +442,6 @@ - @@ -481,8 +459,6 @@ - - @@ -553,8 +529,6 @@ - - @@ -595,7 +569,6 @@ - @@ -723,7 +696,6 @@ - @@ -742,7 +714,6 @@ - @@ -791,7 +762,6 @@ - @@ -819,14 +789,11 @@ - - - @@ -853,7 +820,6 @@ - @@ -881,8 +847,6 @@ - - @@ -981,7 +945,6 @@ - diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 625d0b66357..bbf4170591d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,17 +1,17 @@ elasticsearch = 6.0.0-alpha1 -lucene = 6.2.0 +lucene = 6.3.0-snapshot-a66a445 # optional dependencies spatial4j = 0.6 jts = 1.13 jackson = 2.8.1 snakeyaml = 1.15 -log4j = 2.6.2 +log4j = 2.7 slf4j = 1.6.2 jna = 4.2.2 # test dependencies -randomizedrunner = 2.3.2 +randomizedrunner = 2.4.0 junit = 4.11 httpclient = 4.5.2 httpcore = 4.4.5 @@ -20,4 +20,4 @@ commonscodec = 1.10 hamcrest = 1.3 securemock = 1.2 # benchmark dependencies -jmh = 1.14 +jmh = 1.15 diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java index c38234ef302..6d6e5ade827 100644 --- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java +++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java @@ -27,7 +27,7 @@ import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor; import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugin.noop.NoopPlugin; import org.elasticsearch.plugin.noop.action.bulk.NoopBulkAction; @@ -51,7 +51,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark { + BulkRestBuilderListener listener = new BulkRestBuilderListener(channel, request); + listener.onResponse(bulkRequest); + }; } private static class BulkRestBuilderListener extends RestBuilderListener { - private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, "update", + private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); private final RestRequest request; diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java index dcc225c2603..2a5efee1881 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.plugin.noop.action.bulk; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -34,7 +35,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; public class TransportNoopBulkAction extends HandledTransportAction { - private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, "update", + private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED)); @Inject diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java index 3520876af04..9bcde4ca399 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestStatusToXContentListener; @@ -47,8 +46,8 @@ public class RestNoopSearchAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { SearchRequest searchRequest = new SearchRequest(); - client.execute(NoopSearchAction.INSTANCE, searchRequest, new RestStatusToXContentListener<>(channel)); + return channel -> client.execute(NoopSearchAction.INSTANCE, searchRequest, new RestStatusToXContentListener<>(channel)); } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumer.java b/client/rest/src/main/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumer.java index da7f5c79721..84753e6f75c 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumer.java +++ b/client/rest/src/main/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumer.java @@ -38,25 +38,15 @@ import java.io.IOException; /** * Default implementation of {@link org.apache.http.nio.protocol.HttpAsyncResponseConsumer}. Buffers the whole * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. - * Limits the size of responses that can be read to {@link #DEFAULT_BUFFER_LIMIT} by default, configurable value. - * Throws an exception in case the entity is longer than the configured buffer limit. + * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer + * than the configured buffer limit. */ public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseConsumer { - //default buffer limit is 10MB - public static final int DEFAULT_BUFFER_LIMIT = 10 * 1024 * 1024; - - private final int bufferLimit; + private final int bufferLimitBytes; private volatile HttpResponse response; private volatile SimpleInputBuffer buf; - /** - * Creates a new instance of this consumer with a buffer limit of {@link #DEFAULT_BUFFER_LIMIT} - */ - public HeapBufferedAsyncResponseConsumer() { - this.bufferLimit = DEFAULT_BUFFER_LIMIT; - } - /** * Creates a new instance of this consumer with the provided buffer limit */ @@ -64,7 +54,14 @@ public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseCons if (bufferLimit <= 0) { throw new IllegalArgumentException("bufferLimit must be greater than 0"); } - this.bufferLimit = bufferLimit; + this.bufferLimitBytes = bufferLimit; + } + + /** + * Get the limit of the buffer. + */ + public int getBufferLimit() { + return bufferLimitBytes; } @Override @@ -75,9 +72,9 @@ public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseCons @Override protected void onEntityEnclosed(HttpEntity entity, ContentType contentType) throws IOException { long len = entity.getContentLength(); - if (len > bufferLimit) { + if (len > bufferLimitBytes) { throw new ContentTooLongException("entity content is too long [" + len + - "] for the configured buffer limit [" + bufferLimit + "]"); + "] for the configured buffer limit [" + bufferLimitBytes + "]"); } if (len < 0) { len = 4096; diff --git a/client/rest/src/main/java/org/elasticsearch/client/HttpAsyncResponseConsumerFactory.java b/client/rest/src/main/java/org/elasticsearch/client/HttpAsyncResponseConsumerFactory.java new file mode 100644 index 00000000000..a5e5b39bed5 --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/HttpAsyncResponseConsumerFactory.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpResponse; +import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; + +import static org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.DEFAULT_BUFFER_LIMIT; + +/** + * Factory used to create instances of {@link HttpAsyncResponseConsumer}. Each request retry needs its own instance of the + * consumer object. Users can implement this interface and pass their own instance to the specialized + * performRequest methods that accept an {@link HttpAsyncResponseConsumerFactory} instance as argument. + */ +interface HttpAsyncResponseConsumerFactory { + + /** + * Creates the default type of {@link HttpAsyncResponseConsumer}, based on heap buffering with a buffer limit of 100MB. + */ + HttpAsyncResponseConsumerFactory DEFAULT = new HeapBufferedResponseConsumerFactory(DEFAULT_BUFFER_LIMIT); + + /** + * Creates the {@link HttpAsyncResponseConsumer}, called once per request attempt. + */ + HttpAsyncResponseConsumer createHttpAsyncResponseConsumer(); + + /** + * Default factory used to create instances of {@link HttpAsyncResponseConsumer}. + * Creates one instance of {@link HeapBufferedAsyncResponseConsumer} for each request attempt, with a configurable + * buffer limit which defaults to 100MB. + */ + class HeapBufferedResponseConsumerFactory implements HttpAsyncResponseConsumerFactory { + + //default buffer limit is 100MB + static final int DEFAULT_BUFFER_LIMIT = 100 * 1024 * 1024; + + private final int bufferLimit; + + public HeapBufferedResponseConsumerFactory(int bufferLimitBytes) { + this.bufferLimit = bufferLimitBytes; + } + + @Override + public HttpAsyncResponseConsumer createHttpAsyncResponseConsumer() { + return new HeapBufferedAsyncResponseConsumer(bufferLimit); + } + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index d2301e1e8e7..89c3309dbbd 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -143,7 +143,7 @@ public class RestClient implements Closeable { * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error */ public Response performRequest(String method, String endpoint, Header... headers) throws IOException { - return performRequest(method, endpoint, Collections.emptyMap(), (HttpEntity)null, headers); + return performRequest(method, endpoint, Collections.emptyMap(), null, headers); } /** @@ -165,9 +165,9 @@ public class RestClient implements Closeable { /** * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response - * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, Header...)} - * which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer} - * will be used to consume the response body. + * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumerFactory, Header...)} + * which doesn't require specifying an {@link HttpAsyncResponseConsumerFactory} instance, + * {@link HttpAsyncResponseConsumerFactory} will be used to create the needed instances of {@link HttpAsyncResponseConsumer}. * * @param method the http method * @param endpoint the path of the request (without host and port) @@ -181,8 +181,7 @@ public class RestClient implements Closeable { */ public Response performRequest(String method, String endpoint, Map params, HttpEntity entity, Header... headers) throws IOException { - HttpAsyncResponseConsumer responseConsumer = new HeapBufferedAsyncResponseConsumer(); - return performRequest(method, endpoint, params, entity, responseConsumer, headers); + return performRequest(method, endpoint, params, entity, HttpAsyncResponseConsumerFactory.DEFAULT, headers); } /** @@ -196,8 +195,9 @@ public class RestClient implements Closeable { * @param endpoint the path of the request (without host and port) * @param params the query_string parameters * @param entity the body of the request, null if not applicable - * @param responseConsumer the {@link HttpAsyncResponseConsumer} callback. Controls how the response - * body gets streamed from a non-blocking HTTP connection on the client side. + * @param httpAsyncResponseConsumerFactory the {@link HttpAsyncResponseConsumerFactory} used to create one + * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the response body gets streamed from a non-blocking HTTP + * connection on the client side. * @param headers the optional request headers * @return the response returned by Elasticsearch * @throws IOException in case of a problem or the connection was aborted @@ -205,10 +205,10 @@ public class RestClient implements Closeable { * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error */ public Response performRequest(String method, String endpoint, Map params, - HttpEntity entity, HttpAsyncResponseConsumer responseConsumer, + HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, Header... headers) throws IOException { SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis); - performRequestAsync(method, endpoint, params, entity, responseConsumer, listener, headers); + performRequestAsync(method, endpoint, params, entity, httpAsyncResponseConsumerFactory, listener, headers); return listener.get(); } @@ -245,9 +245,9 @@ public class RestClient implements Closeable { /** * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead * the provided {@link ResponseListener} will be notified upon completion or failure. - * Shortcut to {@link #performRequestAsync(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, ResponseListener, Header...)} - * which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer} - * will be used to consume the response body. + * Shortcut to {@link #performRequestAsync(String, String, Map, HttpEntity, HttpAsyncResponseConsumerFactory, ResponseListener, + * Header...)} which doesn't require specifying an {@link HttpAsyncResponseConsumerFactory} instance, + * {@link HttpAsyncResponseConsumerFactory} will be used to create the needed instances of {@link HttpAsyncResponseConsumer}. * * @param method the http method * @param endpoint the path of the request (without host and port) @@ -258,8 +258,7 @@ public class RestClient implements Closeable { */ public void performRequestAsync(String method, String endpoint, Map params, HttpEntity entity, ResponseListener responseListener, Header... headers) { - HttpAsyncResponseConsumer responseConsumer = new HeapBufferedAsyncResponseConsumer(); - performRequestAsync(method, endpoint, params, entity, responseConsumer, responseListener, headers); + performRequestAsync(method, endpoint, params, entity, HttpAsyncResponseConsumerFactory.DEFAULT, responseListener, headers); } /** @@ -274,29 +273,31 @@ public class RestClient implements Closeable { * @param endpoint the path of the request (without host and port) * @param params the query_string parameters * @param entity the body of the request, null if not applicable - * @param responseConsumer the {@link HttpAsyncResponseConsumer} callback. Controls how the response - * body gets streamed from a non-blocking HTTP connection on the client side. + * @param httpAsyncResponseConsumerFactory the {@link HttpAsyncResponseConsumerFactory} used to create one + * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the response body gets streamed from a non-blocking HTTP + * connection on the client side. * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails * @param headers the optional request headers */ public void performRequestAsync(String method, String endpoint, Map params, - HttpEntity entity, HttpAsyncResponseConsumer responseConsumer, + HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, ResponseListener responseListener, Header... headers) { URI uri = buildUri(pathPrefix, endpoint, params); HttpRequestBase request = createHttpRequest(method, uri, entity); setHeaders(request, headers); FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener); long startTime = System.nanoTime(); - performRequestAsync(startTime, nextHost().iterator(), request, responseConsumer, failureTrackingResponseListener); + performRequestAsync(startTime, nextHost().iterator(), request, httpAsyncResponseConsumerFactory, failureTrackingResponseListener); } private void performRequestAsync(final long startTime, final Iterator hosts, final HttpRequestBase request, - final HttpAsyncResponseConsumer responseConsumer, + final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, final FailureTrackingResponseListener listener) { final HttpHost host = hosts.next(); //we stream the request body if the entity allows for it HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request); - client.execute(requestProducer, responseConsumer, new FutureCallback() { + HttpAsyncResponseConsumer asyncResponseConsumer = httpAsyncResponseConsumerFactory.createHttpAsyncResponseConsumer(); + client.execute(requestProducer, asyncResponseConsumer, new FutureCallback() { @Override public void completed(HttpResponse httpResponse) { try { @@ -346,7 +347,7 @@ public class RestClient implements Closeable { } else { listener.trackFailure(exception); request.reset(); - performRequestAsync(startTime, hosts, request, responseConsumer, listener); + performRequestAsync(startTime, hosts, request, httpAsyncResponseConsumerFactory, listener); } } else { listener.onDefinitiveFailure(exception); @@ -510,6 +511,7 @@ public class RestClient implements Closeable { private static URI buildUri(String pathPrefix, String path, Map params) { Objects.requireNonNull(params, "params must not be null"); + Objects.requireNonNull(path, "path must not be null"); try { String fullPath; if (pathPrefix != null) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumerTests.java b/client/rest/src/test/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumerTests.java index d30a9e00b53..2488ea4b435 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumerTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumerTests.java @@ -32,7 +32,6 @@ import org.apache.http.nio.ContentDecoder; import org.apache.http.nio.IOControl; import org.apache.http.protocol.HttpContext; -import static org.elasticsearch.client.HeapBufferedAsyncResponseConsumer.DEFAULT_BUFFER_LIMIT; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; @@ -45,13 +44,14 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { //maximum buffer that this test ends up allocating is 50MB private static final int MAX_TEST_BUFFER_SIZE = 50 * 1024 * 1024; + private static final int TEST_BUFFER_LIMIT = 10 * 1024 * 1024; public void testResponseProcessing() throws Exception { ContentDecoder contentDecoder = mock(ContentDecoder.class); IOControl ioControl = mock(IOControl.class); HttpContext httpContext = mock(HttpContext.class); - HeapBufferedAsyncResponseConsumer consumer = spy(new HeapBufferedAsyncResponseConsumer()); + HeapBufferedAsyncResponseConsumer consumer = spy(new HeapBufferedAsyncResponseConsumer(TEST_BUFFER_LIMIT)); ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); @@ -74,8 +74,8 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { } public void testDefaultBufferLimit() throws Exception { - HeapBufferedAsyncResponseConsumer consumer = new HeapBufferedAsyncResponseConsumer(); - bufferLimitTest(consumer, DEFAULT_BUFFER_LIMIT); + HeapBufferedAsyncResponseConsumer consumer = new HeapBufferedAsyncResponseConsumer(TEST_BUFFER_LIMIT); + bufferLimitTest(consumer, TEST_BUFFER_LIMIT); } public void testConfiguredBufferLimit() throws Exception { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java index 789f2bf6f6d..17c2a158ea8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.client; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.apache.http.HttpEntity; import org.apache.http.HttpEntityEnclosingRequest; import org.apache.http.HttpHost; @@ -62,7 +62,7 @@ public class RequestLoggerTests extends RestClientTestCase { } HttpRequestBase request; - int requestType = RandomInts.randomIntBetween(getRandom(), 0, 7); + int requestType = RandomNumbers.randomIntBetween(getRandom(), 0, 7); switch(requestType) { case 0: request = new HttpGetWithEntity(uri); @@ -99,7 +99,7 @@ public class RequestLoggerTests extends RestClientTestCase { expected += " -d '" + requestBody + "'"; HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; HttpEntity entity; - switch(RandomInts.randomIntBetween(getRandom(), 0, 3)) { + switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) { case 0: entity = new StringEntity(requestBody, StandardCharsets.UTF_8); break; @@ -128,12 +128,12 @@ public class RequestLoggerTests extends RestClientTestCase { public void testTraceResponse() throws IOException { ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - int statusCode = RandomInts.randomIntBetween(getRandom(), 200, 599); + int statusCode = RandomNumbers.randomIntBetween(getRandom(), 200, 599); String reasonPhrase = "REASON"; BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase); String expected = "# " + statusLine.toString(); BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); - int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3); + int numHeaders = RandomNumbers.randomIntBetween(getRandom(), 0, 3); for (int i = 0; i < numHeaders; i++) { httpResponse.setHeader("header" + i, "value"); expected += "\n# header" + i + ": value"; diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java new file mode 100644 index 00000000000..f997f798712 --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -0,0 +1,210 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.http.HttpHost; +import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; +import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; +import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. + * Works against real http servers, multiple hosts. Also tests failover by randomly shutting down hosts. + */ +//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes +@IgnoreJRERequirement +public class RestClientMultipleHostsIntegTests extends RestClientTestCase { + + private static HttpServer[] httpServers; + private static RestClient restClient; + private static String pathPrefix; + + @BeforeClass + public static void startHttpServer() throws Exception { + String pathPrefixWithoutLeadingSlash; + if (randomBoolean()) { + pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiOfLengthBetween(1, 5); + pathPrefix = "/" + pathPrefixWithoutLeadingSlash; + } else { + pathPrefix = pathPrefixWithoutLeadingSlash = ""; + } + int numHttpServers = randomIntBetween(2, 4); + httpServers = new HttpServer[numHttpServers]; + HttpHost[] httpHosts = new HttpHost[numHttpServers]; + for (int i = 0; i < numHttpServers; i++) { + HttpServer httpServer = createHttpServer(); + httpServers[i] = httpServer; + httpHosts[i] = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); + } + RestClientBuilder restClientBuilder = RestClient.builder(httpHosts); + if (pathPrefix.length() > 0) { + restClientBuilder.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefixWithoutLeadingSlash); + } + restClient = restClientBuilder.build(); + } + + private static HttpServer createHttpServer() throws Exception { + HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + httpServer.start(); + //returns a different status code depending on the path + for (int statusCode : getAllStatusCodes()) { + httpServer.createContext(pathPrefix + "/" + statusCode, new ResponseHandler(statusCode)); + } + return httpServer; + } + + //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes + @IgnoreJRERequirement + private static class ResponseHandler implements HttpHandler { + private final int statusCode; + + ResponseHandler(int statusCode) { + this.statusCode = statusCode; + } + + @Override + public void handle(HttpExchange httpExchange) throws IOException { + httpExchange.getRequestBody().close(); + httpExchange.sendResponseHeaders(statusCode, -1); + httpExchange.close(); + } + } + + @AfterClass + public static void stopHttpServers() throws IOException { + restClient.close(); + restClient = null; + for (HttpServer httpServer : httpServers) { + httpServer.stop(0); + } + httpServers = null; + } + + @Before + public void stopRandomHost() { + //verify that shutting down some hosts doesn't matter as long as one working host is left behind + if (httpServers.length > 1 && randomBoolean()) { + List updatedHttpServers = new ArrayList<>(httpServers.length - 1); + int nodeIndex = randomInt(httpServers.length - 1); + for (int i = 0; i < httpServers.length; i++) { + HttpServer httpServer = httpServers[i]; + if (i == nodeIndex) { + httpServer.stop(0); + } else { + updatedHttpServers.add(httpServer); + } + } + httpServers = updatedHttpServers.toArray(new HttpServer[updatedHttpServers.size()]); + } + } + + public void testSyncRequests() throws IOException { + int numRequests = randomIntBetween(5, 20); + for (int i = 0; i < numRequests; i++) { + final String method = RestClientTestUtil.randomHttpMethod(getRandom()); + //we don't test status codes that are subject to retries as they interfere with hosts being stopped + final int statusCode = randomBoolean() ? randomOkStatusCode(getRandom()) : randomErrorNoRetryStatusCode(getRandom()); + Response response; + try { + response = restClient.performRequest(method, "/" + statusCode); + } catch(ResponseException responseException) { + response = responseException.getResponse(); + } + assertEquals(method, response.getRequestLine().getMethod()); + assertEquals(statusCode, response.getStatusLine().getStatusCode()); + assertEquals((pathPrefix.length() > 0 ? pathPrefix : "") + "/" + statusCode, response.getRequestLine().getUri()); + } + } + + public void testAsyncRequests() throws Exception { + int numRequests = randomIntBetween(5, 20); + final CountDownLatch latch = new CountDownLatch(numRequests); + final List responses = new CopyOnWriteArrayList<>(); + for (int i = 0; i < numRequests; i++) { + final String method = RestClientTestUtil.randomHttpMethod(getRandom()); + //we don't test status codes that are subject to retries as they interfere with hosts being stopped + final int statusCode = randomBoolean() ? randomOkStatusCode(getRandom()) : randomErrorNoRetryStatusCode(getRandom()); + restClient.performRequestAsync(method, "/" + statusCode, new ResponseListener() { + @Override + public void onSuccess(Response response) { + responses.add(new TestResponse(method, statusCode, response)); + latch.countDown(); + } + + @Override + public void onFailure(Exception exception) { + responses.add(new TestResponse(method, statusCode, exception)); + latch.countDown(); + } + }); + } + assertTrue(latch.await(5, TimeUnit.SECONDS)); + + assertEquals(numRequests, responses.size()); + for (TestResponse testResponse : responses) { + Response response = testResponse.getResponse(); + assertEquals(testResponse.method, response.getRequestLine().getMethod()); + assertEquals(testResponse.statusCode, response.getStatusLine().getStatusCode()); + assertEquals((pathPrefix.length() > 0 ? pathPrefix : "") + "/" + testResponse.statusCode, + response.getRequestLine().getUri()); + } + } + + private static class TestResponse { + private final String method; + private final int statusCode; + private final Object response; + + TestResponse(String method, int statusCode, Object response) { + this.method = method; + this.statusCode = statusCode; + this.response = response; + } + + Response getResponse() { + if (response instanceof Response) { + return (Response) response; + } + if (response instanceof ResponseException) { + return ((ResponseException) response).getResponse(); + } + throw new AssertionError("unexpected response " + response.getClass()); + } + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index 049a216936f..90ee4431009 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.client; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.apache.http.Header; import org.apache.http.HttpHost; import org.apache.http.HttpResponse; @@ -95,7 +95,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { return null; } }); - int numHosts = RandomInts.randomIntBetween(getRandom(), 2, 5); + int numHosts = RandomNumbers.randomIntBetween(getRandom(), 2, 5); httpHosts = new HttpHost[numHosts]; for (int i = 0; i < numHosts; i++) { httpHosts[i] = new HttpHost("localhost", 9200 + i); @@ -105,7 +105,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { } public void testRoundRobinOkStatusCodes() throws IOException { - int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5); + int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { Set hostsSet = new HashSet<>(); Collections.addAll(hostsSet, httpHosts); @@ -121,7 +121,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { } public void testRoundRobinNoRetryErrors() throws IOException { - int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5); + int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { Set hostsSet = new HashSet<>(); Collections.addAll(hostsSet, httpHosts); @@ -198,7 +198,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); } - int numIters = RandomInts.randomIntBetween(getRandom(), 2, 5); + int numIters = RandomNumbers.randomIntBetween(getRandom(), 2, 5); for (int i = 1; i <= numIters; i++) { //check that one different host is resurrected at each new attempt Set hostsSet = new HashSet<>(); @@ -228,7 +228,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { if (getRandom().nextBoolean()) { //mark one host back alive through a successful request and check that all requests after that are sent to it HttpHost selectedHost = null; - int iters = RandomInts.randomIntBetween(getRandom(), 2, 10); + int iters = RandomNumbers.randomIntBetween(getRandom(), 2, 10); for (int y = 0; y < iters; y++) { int statusCode = randomErrorNoRetryStatusCode(getRandom()); Response response; @@ -269,7 +269,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { } private static String randomErrorRetryEndpoint() { - switch(RandomInts.randomIntBetween(getRandom(), 0, 3)) { + switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) { case 0: return "/" + randomErrorRetryStatusCode(getRandom()); case 1: diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java similarity index 64% rename from client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java rename to client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 9c5c50946d8..4440c1e8f97 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import com.sun.net.httpserver.Headers; -import com.sun.net.httpserver.HttpContext; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; @@ -45,19 +44,13 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode; -import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; /** * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. @@ -65,28 +58,42 @@ import static org.junit.Assert.fail; */ //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes @IgnoreJRERequirement -public class RestClientIntegTests extends RestClientTestCase { +public class RestClientSingleHostIntegTests extends RestClientTestCase { private static HttpServer httpServer; private static RestClient restClient; + private static String pathPrefix; private static Header[] defaultHeaders; @BeforeClass public static void startHttpServer() throws Exception { - httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + String pathPrefixWithoutLeadingSlash; + if (randomBoolean()) { + pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiOfLengthBetween(1, 5); + pathPrefix = "/" + pathPrefixWithoutLeadingSlash; + } else { + pathPrefix = pathPrefixWithoutLeadingSlash = ""; + } + + httpServer = createHttpServer(); + int numHeaders = randomIntBetween(0, 5); + defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders); + RestClientBuilder restClientBuilder = RestClient.builder( + new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())).setDefaultHeaders(defaultHeaders); + if (pathPrefix.length() > 0) { + restClientBuilder.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefixWithoutLeadingSlash); + } + restClient = restClientBuilder.build(); + } + + private static HttpServer createHttpServer() throws Exception { + HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); httpServer.start(); //returns a different status code depending on the path for (int statusCode : getAllStatusCodes()) { - createStatusCodeContext(httpServer, statusCode); + httpServer.createContext(pathPrefix + "/" + statusCode, new ResponseHandler(statusCode)); } - int numHeaders = randomIntBetween(0, 5); - defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders); - restClient = RestClient.builder(new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())) - .setDefaultHeaders(defaultHeaders).build(); - } - - private static void createStatusCodeContext(HttpServer httpServer, final int statusCode) { - httpServer.createContext("/" + statusCode, new ResponseHandler(statusCode)); + return httpServer; } //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes @@ -157,7 +164,11 @@ public class RestClientIntegTests extends RestClientTestCase { } catch(ResponseException e) { esResponse = e.getResponse(); } - assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode)); + + assertEquals(method, esResponse.getRequestLine().getMethod()); + assertEquals(statusCode, esResponse.getStatusLine().getStatusCode()); + assertEquals((pathPrefix.length() > 0 ? pathPrefix : "") + "/" + statusCode, esResponse.getRequestLine().getUri()); + for (final Header responseHeader : esResponse.getHeaders()) { final String name = responseHeader.getName(); final String value = responseHeader.getValue(); @@ -197,38 +208,6 @@ public class RestClientIntegTests extends RestClientTestCase { bodyTest("GET"); } - /** - * Ensure that pathPrefix works as expected. - */ - public void testPathPrefix() throws IOException { - // guarantee no other test setup collides with this one and lets it sneak through - final String uniqueContextSuffix = "/testPathPrefix"; - final String pathPrefix = "base/" + randomAsciiOfLengthBetween(1, 5) + "/"; - final int statusCode = randomStatusCode(getRandom()); - - final HttpContext context = - httpServer.createContext("/" + pathPrefix + statusCode + uniqueContextSuffix, new ResponseHandler(statusCode)); - - try (final RestClient client = - RestClient.builder(new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())) - .setPathPrefix((randomBoolean() ? "/" : "") + pathPrefix).build()) { - - for (final String method : getHttpMethods()) { - Response esResponse; - try { - esResponse = client.performRequest(method, "/" + statusCode + uniqueContextSuffix); - } catch(ResponseException e) { - esResponse = e.getResponse(); - } - - assertThat(esResponse.getRequestLine().getUri(), equalTo("/" + pathPrefix + statusCode + uniqueContextSuffix)); - assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode)); - } - } finally { - httpServer.removeContext(context); - } - } - private void bodyTest(String method) throws IOException { String requestBody = "{ \"field\": \"value\" }"; StringEntity entity = new StringEntity(requestBody); @@ -239,60 +218,9 @@ public class RestClientIntegTests extends RestClientTestCase { } catch(ResponseException e) { esResponse = e.getResponse(); } + assertEquals(method, esResponse.getRequestLine().getMethod()); assertEquals(statusCode, esResponse.getStatusLine().getStatusCode()); + assertEquals((pathPrefix.length() > 0 ? pathPrefix : "") + "/" + statusCode, esResponse.getRequestLine().getUri()); assertEquals(requestBody, EntityUtils.toString(esResponse.getEntity())); } - - public void testAsyncRequests() throws Exception { - int numRequests = randomIntBetween(5, 20); - final CountDownLatch latch = new CountDownLatch(numRequests); - final List responses = new CopyOnWriteArrayList<>(); - for (int i = 0; i < numRequests; i++) { - final String method = RestClientTestUtil.randomHttpMethod(getRandom()); - final int statusCode = randomStatusCode(getRandom()); - restClient.performRequestAsync(method, "/" + statusCode, new ResponseListener() { - @Override - public void onSuccess(Response response) { - responses.add(new TestResponse(method, statusCode, response)); - latch.countDown(); - } - - @Override - public void onFailure(Exception exception) { - responses.add(new TestResponse(method, statusCode, exception)); - latch.countDown(); - } - }); - } - assertTrue(latch.await(5, TimeUnit.SECONDS)); - - assertEquals(numRequests, responses.size()); - for (TestResponse response : responses) { - assertEquals(response.method, response.getResponse().getRequestLine().getMethod()); - assertEquals(response.statusCode, response.getResponse().getStatusLine().getStatusCode()); - - } - } - - private static class TestResponse { - private final String method; - private final int statusCode; - private final Object response; - - TestResponse(String method, int statusCode, Object response) { - this.method = method; - this.statusCode = statusCode; - this.response = response; - } - - Response getResponse() { - if (response instanceof Response) { - return (Response) response; - } - if (response instanceof ResponseException) { - return ((ResponseException) response).getResponse(); - } - throw new AssertionError("unexpected response " + response.getClass()); - } - } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 92e2b0da971..ce0d6d0936e 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -139,6 +139,17 @@ public class RestClientSingleHostTests extends RestClientTestCase { restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener); } + public void testNullPath() throws IOException { + for (String method : getHttpMethods()) { + try { + restClient.performRequest(method, null); + fail("path set to null should fail!"); + } catch (NullPointerException e) { + assertEquals("path must not be null", e.getMessage()); + } + } + } + /** * Verifies the content of the {@link HttpRequest} that's internally created and passed through to the http client */ diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java index a926cabb87d..aeb0620134b 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.client.sniff; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.fasterxml.jackson.core.JsonFactory; @@ -69,7 +69,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { @Before public void startHttpServer() throws IOException { - this.sniffRequestTimeout = RandomInts.randomIntBetween(getRandom(), 1000, 10000); + this.sniffRequestTimeout = RandomNumbers.randomIntBetween(getRandom(), 1000, 10000); this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchHostsSniffer.Scheme.values()); if (rarely()) { this.sniffResponse = SniffResponse.buildFailure(); @@ -101,7 +101,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { assertEquals(e.getMessage(), "scheme cannot be null"); } try { - new ElasticsearchHostsSniffer(restClient, RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0), + new ElasticsearchHostsSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0), ElasticsearchHostsSniffer.Scheme.HTTP); fail("should have failed"); } catch (IllegalArgumentException e) { @@ -175,7 +175,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { } private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme scheme) throws IOException { - int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5); + int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5); List hosts = new ArrayList<>(numNodes); JsonFactory jsonFactory = new JsonFactory(); StringWriter writer = new StringWriter(); @@ -205,7 +205,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { boolean isHttpEnabled = rarely() == false; if (isHttpEnabled) { String host = "host" + i; - int port = RandomInts.randomIntBetween(getRandom(), 9200, 9299); + int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); HttpHost httpHost = new HttpHost(host, port, scheme.toString()); hosts.add(httpHost); generator.writeObjectFieldStart("http"); @@ -228,7 +228,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { } if (getRandom().nextBoolean()) { String[] roles = {"master", "data", "ingest"}; - int numRoles = RandomInts.randomIntBetween(getRandom(), 0, 3); + int numRoles = RandomNumbers.randomIntBetween(getRandom(), 0, 3); Set nodeRoles = new HashSet<>(numRoles); for (int j = 0; j < numRoles; j++) { String role; @@ -242,7 +242,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { } generator.writeEndArray(); } - int numAttributes = RandomInts.randomIntBetween(getRandom(), 0, 3); + int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3); Map attributes = new HashMap<>(numAttributes); for (int j = 0; j < numAttributes; j++) { attributes.put("attr" + j, "value" + j); @@ -291,6 +291,6 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { } private static int randomErrorResponseCode() { - return RandomInts.randomIntBetween(getRandom(), 400, 599); + return RandomNumbers.randomIntBetween(getRandom(), 400, 599); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index b0c387d733a..9a7359e9c72 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.client.sniff; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.apache.http.HttpHost; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientTestCase; @@ -31,7 +31,7 @@ import static org.junit.Assert.fail; public class SnifferBuilderTests extends RestClientTestCase { public void testBuild() throws Exception { - int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5); + int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5); HttpHost[] hosts = new HttpHost[numNodes]; for (int i = 0; i < numNodes; i++) { hosts[i] = new HttpHost("localhost", 9200 + i); @@ -46,14 +46,14 @@ public class SnifferBuilderTests extends RestClientTestCase { } try { - Sniffer.builder(client).setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); + Sniffer.builder(client).setSniffIntervalMillis(RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals("sniffIntervalMillis must be greater than 0", e.getMessage()); } try { - Sniffer.builder(client).setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); + Sniffer.builder(client).setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals("sniffAfterFailureDelayMillis must be greater than 0", e.getMessage()); @@ -74,10 +74,10 @@ public class SnifferBuilderTests extends RestClientTestCase { SnifferBuilder builder = Sniffer.builder(client); if (getRandom().nextBoolean()) { - builder.setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); + builder.setSniffIntervalMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); } if (getRandom().nextBoolean()) { - builder.setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); + builder.setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); } if (getRandom().nextBoolean()) { builder.setHostsSniffer(new MockHostsSniffer()); diff --git a/core/build.gradle b/core/build.gradle index ea2e3e27ef3..7a580335571 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -62,10 +62,7 @@ dependencies { compile 'com.carrotsearch:hppc:0.7.1' // time handling, remove with java 8 time - compile 'joda-time:joda-time:2.9.4' - // joda 2.0 moved to using volatile fields for datetime - // When updating to a new version, make sure to update our copy of BaseDateTime - compile 'org.joda:joda-convert:1.2' + compile 'joda-time:joda-time:2.9.5' // json and yaml compile "org.yaml:snakeyaml:${versions.snakeyaml}" @@ -158,6 +155,10 @@ thirdPartyAudit.excludes = [ 'com.fasterxml.jackson.databind.ObjectMapper', // from log4j + 'com.beust.jcommander.IStringConverter', + 'com.beust.jcommander.JCommander', + 'com.conversantmedia.util.concurrent.DisruptorBlockingQueue', + 'com.conversantmedia.util.concurrent.SpinPolicy', 'com.fasterxml.jackson.annotation.JsonInclude$Include', 'com.fasterxml.jackson.databind.DeserializationContext', 'com.fasterxml.jackson.databind.JsonMappingException', @@ -176,6 +177,10 @@ thirdPartyAudit.excludes = [ 'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule', 'com.fasterxml.jackson.dataformat.xml.XmlMapper', 'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter', + 'com.fasterxml.jackson.databind.node.JsonNodeFactory', + 'com.fasterxml.jackson.databind.node.ObjectNode', + 'org.fusesource.jansi.Ansi', + 'org.fusesource.jansi.AnsiRenderer$Code', 'com.lmax.disruptor.BlockingWaitStrategy', 'com.lmax.disruptor.BusySpinWaitStrategy', 'com.lmax.disruptor.EventFactory', @@ -228,6 +233,8 @@ thirdPartyAudit.excludes = [ 'org.apache.kafka.clients.producer.Producer', 'org.apache.kafka.clients.producer.ProducerRecord', 'org.codehaus.stax2.XMLStreamWriter2', + 'org.jctools.queues.MessagePassingQueue$Consumer', + 'org.jctools.queues.MpscArrayQueue', 'org.osgi.framework.AdaptPermission', 'org.osgi.framework.AdminPermission', 'org.osgi.framework.Bundle', @@ -247,8 +254,10 @@ thirdPartyAudit.excludes = [ 'org.noggit.JSONParser', ] -// dependency license are currently checked in distribution -dependencyLicenses.enabled = false +dependencyLicenses { + mapping from: /lucene-.*/, to: 'lucene' + mapping from: /jackson-.*/, to: 'jackson' +} if (isEclipse == false || project.path == ":core-tests") { task integTest(type: RandomizedTestingTask, diff --git a/distribution/licenses/HdrHistogram-2.1.6.jar.sha1 b/core/licenses/HdrHistogram-2.1.6.jar.sha1 similarity index 100% rename from distribution/licenses/HdrHistogram-2.1.6.jar.sha1 rename to core/licenses/HdrHistogram-2.1.6.jar.sha1 diff --git a/distribution/licenses/HdrHistogram-LICENSE.txt b/core/licenses/HdrHistogram-LICENSE.txt similarity index 100% rename from distribution/licenses/HdrHistogram-LICENSE.txt rename to core/licenses/HdrHistogram-LICENSE.txt diff --git a/distribution/licenses/HdrHistogram-NOTICE.txt b/core/licenses/HdrHistogram-NOTICE.txt similarity index 100% rename from distribution/licenses/HdrHistogram-NOTICE.txt rename to core/licenses/HdrHistogram-NOTICE.txt diff --git a/distribution/licenses/apache-log4j-extras-DEPENDENCIES b/core/licenses/apache-log4j-extras-DEPENDENCIES similarity index 100% rename from distribution/licenses/apache-log4j-extras-DEPENDENCIES rename to core/licenses/apache-log4j-extras-DEPENDENCIES diff --git a/distribution/licenses/hppc-0.7.1.jar.sha1 b/core/licenses/hppc-0.7.1.jar.sha1 similarity index 100% rename from distribution/licenses/hppc-0.7.1.jar.sha1 rename to core/licenses/hppc-0.7.1.jar.sha1 diff --git a/distribution/licenses/hppc-LICENSE.txt b/core/licenses/hppc-LICENSE.txt similarity index 100% rename from distribution/licenses/hppc-LICENSE.txt rename to core/licenses/hppc-LICENSE.txt diff --git a/distribution/licenses/hppc-NOTICE.txt b/core/licenses/hppc-NOTICE.txt similarity index 100% rename from distribution/licenses/hppc-NOTICE.txt rename to core/licenses/hppc-NOTICE.txt diff --git a/distribution/licenses/jackson-LICENSE b/core/licenses/jackson-LICENSE similarity index 100% rename from distribution/licenses/jackson-LICENSE rename to core/licenses/jackson-LICENSE diff --git a/distribution/licenses/jackson-NOTICE b/core/licenses/jackson-NOTICE similarity index 100% rename from distribution/licenses/jackson-NOTICE rename to core/licenses/jackson-NOTICE diff --git a/distribution/licenses/jackson-core-2.8.1.jar.sha1 b/core/licenses/jackson-core-2.8.1.jar.sha1 similarity index 100% rename from distribution/licenses/jackson-core-2.8.1.jar.sha1 rename to core/licenses/jackson-core-2.8.1.jar.sha1 diff --git a/distribution/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1 b/core/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1 similarity index 100% rename from distribution/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1 rename to core/licenses/jackson-dataformat-cbor-2.8.1.jar.sha1 diff --git a/distribution/licenses/jackson-dataformat-smile-2.8.1.jar.sha1 b/core/licenses/jackson-dataformat-smile-2.8.1.jar.sha1 similarity index 100% rename from distribution/licenses/jackson-dataformat-smile-2.8.1.jar.sha1 rename to core/licenses/jackson-dataformat-smile-2.8.1.jar.sha1 diff --git a/distribution/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1 b/core/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1 similarity index 100% rename from distribution/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1 rename to core/licenses/jackson-dataformat-yaml-2.8.1.jar.sha1 diff --git a/distribution/licenses/jna-4.2.2.jar.sha1 b/core/licenses/jna-4.2.2.jar.sha1 similarity index 100% rename from distribution/licenses/jna-4.2.2.jar.sha1 rename to core/licenses/jna-4.2.2.jar.sha1 diff --git a/distribution/licenses/jna-LICENSE.txt b/core/licenses/jna-LICENSE.txt similarity index 100% rename from distribution/licenses/jna-LICENSE.txt rename to core/licenses/jna-LICENSE.txt diff --git a/distribution/licenses/jna-NOTICE.txt b/core/licenses/jna-NOTICE.txt similarity index 100% rename from distribution/licenses/jna-NOTICE.txt rename to core/licenses/jna-NOTICE.txt diff --git a/core/licenses/joda-time-2.9.5.jar.sha1 b/core/licenses/joda-time-2.9.5.jar.sha1 new file mode 100644 index 00000000000..ecf1c781556 --- /dev/null +++ b/core/licenses/joda-time-2.9.5.jar.sha1 @@ -0,0 +1 @@ +5f01da7306363fad2028b916f3eab926262de928 \ No newline at end of file diff --git a/distribution/licenses/joda-time-LICENSE.txt b/core/licenses/joda-time-LICENSE.txt similarity index 100% rename from distribution/licenses/joda-time-LICENSE.txt rename to core/licenses/joda-time-LICENSE.txt diff --git a/distribution/licenses/joda-time-NOTICE.txt b/core/licenses/joda-time-NOTICE.txt similarity index 100% rename from distribution/licenses/joda-time-NOTICE.txt rename to core/licenses/joda-time-NOTICE.txt diff --git a/distribution/licenses/jopt-simple-5.0.2.jar.sha1 b/core/licenses/jopt-simple-5.0.2.jar.sha1 similarity index 100% rename from distribution/licenses/jopt-simple-5.0.2.jar.sha1 rename to core/licenses/jopt-simple-5.0.2.jar.sha1 diff --git a/distribution/licenses/jopt-simple-LICENSE.txt b/core/licenses/jopt-simple-LICENSE.txt similarity index 100% rename from distribution/licenses/jopt-simple-LICENSE.txt rename to core/licenses/jopt-simple-LICENSE.txt diff --git a/distribution/licenses/jopt-simple-NOTICE.txt b/core/licenses/jopt-simple-NOTICE.txt similarity index 100% rename from distribution/licenses/jopt-simple-NOTICE.txt rename to core/licenses/jopt-simple-NOTICE.txt diff --git a/distribution/licenses/jts-1.13.jar.sha1 b/core/licenses/jts-1.13.jar.sha1 similarity index 100% rename from distribution/licenses/jts-1.13.jar.sha1 rename to core/licenses/jts-1.13.jar.sha1 diff --git a/distribution/licenses/jts-LICENSE.txt b/core/licenses/jts-LICENSE.txt similarity index 100% rename from distribution/licenses/jts-LICENSE.txt rename to core/licenses/jts-LICENSE.txt diff --git a/distribution/licenses/jts-NOTICE.txt b/core/licenses/jts-NOTICE.txt similarity index 100% rename from distribution/licenses/jts-NOTICE.txt rename to core/licenses/jts-NOTICE.txt diff --git a/core/licenses/log4j-1.2-api-2.7.jar.sha1 b/core/licenses/log4j-1.2-api-2.7.jar.sha1 new file mode 100644 index 00000000000..f3644414148 --- /dev/null +++ b/core/licenses/log4j-1.2-api-2.7.jar.sha1 @@ -0,0 +1 @@ +39f4e6c2d68d4ef8fd4b0883d165682dedd5be52 \ No newline at end of file diff --git a/distribution/licenses/log4j-LICENSE.txt b/core/licenses/log4j-LICENSE.txt similarity index 100% rename from distribution/licenses/log4j-LICENSE.txt rename to core/licenses/log4j-LICENSE.txt diff --git a/distribution/licenses/log4j-NOTICE.txt b/core/licenses/log4j-NOTICE.txt similarity index 100% rename from distribution/licenses/log4j-NOTICE.txt rename to core/licenses/log4j-NOTICE.txt diff --git a/core/licenses/log4j-api-2.7.jar.sha1 b/core/licenses/log4j-api-2.7.jar.sha1 new file mode 100644 index 00000000000..8f676d9dbdd --- /dev/null +++ b/core/licenses/log4j-api-2.7.jar.sha1 @@ -0,0 +1 @@ +8de00e382a817981b737be84cb8def687d392963 \ No newline at end of file diff --git a/distribution/licenses/log4j-api-LICENSE.txt b/core/licenses/log4j-api-LICENSE.txt similarity index 100% rename from distribution/licenses/log4j-api-LICENSE.txt rename to core/licenses/log4j-api-LICENSE.txt diff --git a/distribution/licenses/log4j-api-NOTICE.txt b/core/licenses/log4j-api-NOTICE.txt similarity index 100% rename from distribution/licenses/log4j-api-NOTICE.txt rename to core/licenses/log4j-api-NOTICE.txt diff --git a/core/licenses/log4j-core-2.7.jar.sha1 b/core/licenses/log4j-core-2.7.jar.sha1 new file mode 100644 index 00000000000..07bb057a984 --- /dev/null +++ b/core/licenses/log4j-core-2.7.jar.sha1 @@ -0,0 +1 @@ +a3f2b4e64c61a7fc1ed8f1e5ba371933404ed98a \ No newline at end of file diff --git a/distribution/licenses/log4j-core-LICENSE.txt b/core/licenses/log4j-core-LICENSE.txt similarity index 100% rename from distribution/licenses/log4j-core-LICENSE.txt rename to core/licenses/log4j-core-LICENSE.txt diff --git a/distribution/licenses/log4j-core-NOTICE.txt b/core/licenses/log4j-core-NOTICE.txt similarity index 100% rename from distribution/licenses/log4j-core-NOTICE.txt rename to core/licenses/log4j-core-NOTICE.txt diff --git a/distribution/licenses/lucene-LICENSE.txt b/core/licenses/lucene-LICENSE.txt similarity index 100% rename from distribution/licenses/lucene-LICENSE.txt rename to core/licenses/lucene-LICENSE.txt diff --git a/distribution/licenses/lucene-NOTICE.txt b/core/licenses/lucene-NOTICE.txt similarity index 100% rename from distribution/licenses/lucene-NOTICE.txt rename to core/licenses/lucene-NOTICE.txt diff --git a/core/licenses/lucene-analyzers-common-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-analyzers-common-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..1626a88f4a2 --- /dev/null +++ b/core/licenses/lucene-analyzers-common-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +61aacb657e44a9beabf95834e106bbb96373a703 \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-backward-codecs-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..2f45d50eeee --- /dev/null +++ b/core/licenses/lucene-backward-codecs-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +600de75a81e259cab0384e546d9a1d527ddba6d6 \ No newline at end of file diff --git a/core/licenses/lucene-core-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-core-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..9dcdbeb40e9 --- /dev/null +++ b/core/licenses/lucene-core-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +188774468a56a8731ca639527d721060d26ffebd \ No newline at end of file diff --git a/core/licenses/lucene-grouping-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-grouping-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..14c8d7aa2b7 --- /dev/null +++ b/core/licenses/lucene-grouping-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +5afd9271e3d8f645440f48ff2487545ae5573e7e \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-highlighter-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..e695284756d --- /dev/null +++ b/core/licenses/lucene-highlighter-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +0f575175e26d4d3b1095f6300cbefbbb3ee994cd \ No newline at end of file diff --git a/core/licenses/lucene-join-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-join-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..ad02b0cac3b --- /dev/null +++ b/core/licenses/lucene-join-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +ee898c3d318681c9f29c56e6d9b52876be96d814 \ No newline at end of file diff --git a/core/licenses/lucene-memory-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-memory-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..0e36d650670 --- /dev/null +++ b/core/licenses/lucene-memory-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +ea6defd322456711394b4dabcda70a217e3caacd \ No newline at end of file diff --git a/core/licenses/lucene-misc-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-misc-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..e458570651a --- /dev/null +++ b/core/licenses/lucene-misc-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +ea2de7f9753a8e19a1ec9f25a3ea65d7ce909a0e \ No newline at end of file diff --git a/core/licenses/lucene-queries-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-queries-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..1231424e3be --- /dev/null +++ b/core/licenses/lucene-queries-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +0b15c6f29bfb9ec14a4615013a94bfa43a63793d \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-queryparser-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..a367f4e45cf --- /dev/null +++ b/core/licenses/lucene-queryparser-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +d89d9fa1036c38144e0b8db079ae959353847c86 \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-sandbox-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..4c8874c0b4b --- /dev/null +++ b/core/licenses/lucene-sandbox-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +c003c1ab0a19a02b30156ce13372cff1001d6a7d \ No newline at end of file diff --git a/core/licenses/lucene-spatial-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-spatial-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..75dd8263828 --- /dev/null +++ b/core/licenses/lucene-spatial-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +a3c570bf588d7c9ca43d074db9ce9c9b8408b930 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-spatial-extras-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..debd8e0b873 --- /dev/null +++ b/core/licenses/lucene-spatial-extras-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +de54ca61f5892cf2c88ac083b3332a827beca7ff \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-spatial3d-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..b9eb9a0c270 --- /dev/null +++ b/core/licenses/lucene-spatial3d-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +cacdf81b324acd335be63798d5a3dd16e7dff9a3 \ No newline at end of file diff --git a/core/licenses/lucene-suggest-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-suggest-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..a6517bc7d42 --- /dev/null +++ b/core/licenses/lucene-suggest-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +a5cb3723bc8e0db185fc43e57b648145de27fde8 \ No newline at end of file diff --git a/distribution/licenses/securesm-1.1.jar.sha1 b/core/licenses/securesm-1.1.jar.sha1 similarity index 100% rename from distribution/licenses/securesm-1.1.jar.sha1 rename to core/licenses/securesm-1.1.jar.sha1 diff --git a/distribution/licenses/securesm-LICENSE.txt b/core/licenses/securesm-LICENSE.txt similarity index 100% rename from distribution/licenses/securesm-LICENSE.txt rename to core/licenses/securesm-LICENSE.txt diff --git a/distribution/licenses/securesm-NOTICE.txt b/core/licenses/securesm-NOTICE.txt similarity index 100% rename from distribution/licenses/securesm-NOTICE.txt rename to core/licenses/securesm-NOTICE.txt diff --git a/distribution/licenses/snakeyaml-1.15.jar.sha1 b/core/licenses/snakeyaml-1.15.jar.sha1 similarity index 100% rename from distribution/licenses/snakeyaml-1.15.jar.sha1 rename to core/licenses/snakeyaml-1.15.jar.sha1 diff --git a/distribution/licenses/snakeyaml-LICENSE.txt b/core/licenses/snakeyaml-LICENSE.txt similarity index 100% rename from distribution/licenses/snakeyaml-LICENSE.txt rename to core/licenses/snakeyaml-LICENSE.txt diff --git a/distribution/licenses/snakeyaml-NOTICE.txt b/core/licenses/snakeyaml-NOTICE.txt similarity index 100% rename from distribution/licenses/snakeyaml-NOTICE.txt rename to core/licenses/snakeyaml-NOTICE.txt diff --git a/distribution/licenses/spatial4j-0.6.jar.sha1 b/core/licenses/spatial4j-0.6.jar.sha1 similarity index 100% rename from distribution/licenses/spatial4j-0.6.jar.sha1 rename to core/licenses/spatial4j-0.6.jar.sha1 diff --git a/distribution/licenses/spatial4j-ABOUT.txt b/core/licenses/spatial4j-ABOUT.txt similarity index 100% rename from distribution/licenses/spatial4j-ABOUT.txt rename to core/licenses/spatial4j-ABOUT.txt diff --git a/distribution/licenses/spatial4j-LICENSE.txt b/core/licenses/spatial4j-LICENSE.txt similarity index 100% rename from distribution/licenses/spatial4j-LICENSE.txt rename to core/licenses/spatial4j-LICENSE.txt diff --git a/distribution/licenses/spatial4j-NOTICE.txt b/core/licenses/spatial4j-NOTICE.txt similarity index 100% rename from distribution/licenses/spatial4j-NOTICE.txt rename to core/licenses/spatial4j-NOTICE.txt diff --git a/distribution/licenses/t-digest-3.0.jar.sha1 b/core/licenses/t-digest-3.0.jar.sha1 similarity index 100% rename from distribution/licenses/t-digest-3.0.jar.sha1 rename to core/licenses/t-digest-3.0.jar.sha1 diff --git a/distribution/licenses/t-digest-LICENSE.txt b/core/licenses/t-digest-LICENSE.txt similarity index 100% rename from distribution/licenses/t-digest-LICENSE.txt rename to core/licenses/t-digest-LICENSE.txt diff --git a/distribution/licenses/t-digest-NOTICE.txt b/core/licenses/t-digest-NOTICE.txt similarity index 100% rename from distribution/licenses/t-digest-NOTICE.txt rename to core/licenses/t-digest-NOTICE.txt diff --git a/core/src/main/java/org/apache/logging/log4j/core/impl/ThrowableProxy.java b/core/src/main/java/org/apache/logging/log4j/core/impl/ThrowableProxy.java deleted file mode 100644 index 37ab0a15391..00000000000 --- a/core/src/main/java/org/apache/logging/log4j/core/impl/ThrowableProxy.java +++ /dev/null @@ -1,665 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache license, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the license for the specific language governing permissions and - * limitations under the license. - */ - -package org.apache.logging.log4j.core.impl; - -import java.io.Serializable; -import java.net.URL; -import java.security.CodeSource; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Stack; - -import org.apache.logging.log4j.core.util.Loader; -import org.apache.logging.log4j.status.StatusLogger; -import org.apache.logging.log4j.util.ReflectionUtil; -import org.apache.logging.log4j.util.Strings; - -/** - * Wraps a Throwable to add packaging information about each stack trace element. - * - *

- * A proxy is used to represent a throwable that may not exist in a different class loader or JVM. When an application - * deserializes a ThrowableProxy, the throwable may not be set, but the throwable's information is preserved in other - * fields of the proxy like the message and stack trace. - *

- * - *

- * TODO: Move this class to org.apache.logging.log4j.core because it is used from LogEvent. - *

- *

- * TODO: Deserialize: Try to rebuild Throwable if the target exception is in this class loader? - *

- */ -public class ThrowableProxy implements Serializable { - - private static final String CAUSED_BY_LABEL = "Caused by: "; - private static final String SUPPRESSED_LABEL = "Suppressed: "; - private static final String WRAPPED_BY_LABEL = "Wrapped by: "; - - /** - * Cached StackTracePackageElement and ClassLoader. - *

- * Consider this class private. - *

- */ - static class CacheEntry { - private final ExtendedClassInfo element; - private final ClassLoader loader; - - public CacheEntry(final ExtendedClassInfo element, final ClassLoader loader) { - this.element = element; - this.loader = loader; - } - } - - private static final ThrowableProxy[] EMPTY_THROWABLE_PROXY_ARRAY = new ThrowableProxy[0]; - - private static final char EOL = '\n'; - - private static final long serialVersionUID = -2752771578252251910L; - - private final ThrowableProxy causeProxy; - - private int commonElementCount; - - private final ExtendedStackTraceElement[] extendedStackTrace; - - private final String localizedMessage; - - private final String message; - - private final String name; - - private final ThrowableProxy[] suppressedProxies; - - private final transient Throwable throwable; - - /** - * For JSON and XML IO via Jackson. - */ - @SuppressWarnings("unused") - private ThrowableProxy() { - this.throwable = null; - this.name = null; - this.extendedStackTrace = null; - this.causeProxy = null; - this.message = null; - this.localizedMessage = null; - this.suppressedProxies = EMPTY_THROWABLE_PROXY_ARRAY; - } - - /** - * Constructs the wrapper for the Throwable that includes packaging data. - * - * @param throwable - * The Throwable to wrap, must not be null. - */ - public ThrowableProxy(final Throwable throwable) { - this(throwable, null); - } - - /** - * Constructs the wrapper for the Throwable that includes packaging data. - * - * @param throwable - * The Throwable to wrap, must not be null. - * @param visited - * The set of visited suppressed exceptions. - */ - private ThrowableProxy(final Throwable throwable, final Set visited) { - this.throwable = throwable; - this.name = throwable.getClass().getName(); - this.message = throwable.getMessage(); - this.localizedMessage = throwable.getLocalizedMessage(); - final Map map = new HashMap<>(); - final Stack> stack = ReflectionUtil.getCurrentStackTrace(); - this.extendedStackTrace = this.toExtendedStackTrace(stack, map, null, throwable.getStackTrace()); - final Throwable throwableCause = throwable.getCause(); - final Set causeVisited = new HashSet<>(1); - this.causeProxy = throwableCause == null ? null : new ThrowableProxy(throwable, stack, map, throwableCause, visited, causeVisited); - this.suppressedProxies = this.toSuppressedProxies(throwable, visited); - } - - /** - * Constructs the wrapper for a Throwable that is referenced as the cause by another Throwable. - * - * @param parent - * The Throwable referencing this Throwable. - * @param stack - * The Class stack. - * @param map - * The cache containing the packaging data. - * @param cause - * The Throwable to wrap. - * @param suppressedVisited TODO - * @param causeVisited TODO - */ - private ThrowableProxy(final Throwable parent, final Stack> stack, final Map map, - final Throwable cause, final Set suppressedVisited, final Set causeVisited) { - causeVisited.add(cause); - this.throwable = cause; - this.name = cause.getClass().getName(); - this.message = this.throwable.getMessage(); - this.localizedMessage = this.throwable.getLocalizedMessage(); - this.extendedStackTrace = this.toExtendedStackTrace(stack, map, parent.getStackTrace(), cause.getStackTrace()); - final Throwable causeCause = cause.getCause(); - this.causeProxy = causeCause == null || causeVisited.contains(causeCause) ? null : new ThrowableProxy(parent, - stack, map, causeCause, suppressedVisited, causeVisited); - this.suppressedProxies = this.toSuppressedProxies(cause, suppressedVisited); - } - - @Override - public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (this.getClass() != obj.getClass()) { - return false; - } - final ThrowableProxy other = (ThrowableProxy) obj; - if (this.causeProxy == null) { - if (other.causeProxy != null) { - return false; - } - } else if (!this.causeProxy.equals(other.causeProxy)) { - return false; - } - if (this.commonElementCount != other.commonElementCount) { - return false; - } - if (this.name == null) { - if (other.name != null) { - return false; - } - } else if (!this.name.equals(other.name)) { - return false; - } - if (!Arrays.equals(this.extendedStackTrace, other.extendedStackTrace)) { - return false; - } - if (!Arrays.equals(this.suppressedProxies, other.suppressedProxies)) { - return false; - } - return true; - } - - private void formatCause(final StringBuilder sb, final String prefix, final ThrowableProxy cause, final List ignorePackages) { - formatThrowableProxy(sb, prefix, CAUSED_BY_LABEL, cause, ignorePackages); - } - - private void formatThrowableProxy(final StringBuilder sb, final String prefix, final String causeLabel, - final ThrowableProxy throwableProxy, final List ignorePackages) { - if (throwableProxy == null) { - return; - } - sb.append(prefix).append(causeLabel).append(throwableProxy).append(EOL); - this.formatElements(sb, prefix, throwableProxy.commonElementCount, - throwableProxy.getStackTrace(), throwableProxy.extendedStackTrace, ignorePackages); - this.formatSuppressed(sb, prefix + "\t", throwableProxy.suppressedProxies, ignorePackages); - this.formatCause(sb, prefix, throwableProxy.causeProxy, ignorePackages); - } - - private void formatSuppressed(final StringBuilder sb, final String prefix, final ThrowableProxy[] suppressedProxies, - final List ignorePackages) { - if (suppressedProxies == null) { - return; - } - for (final ThrowableProxy suppressedProxy : suppressedProxies) { - final ThrowableProxy cause = suppressedProxy; - formatThrowableProxy(sb, prefix, SUPPRESSED_LABEL, cause, ignorePackages); - } - } - - private void formatElements(final StringBuilder sb, final String prefix, final int commonCount, - final StackTraceElement[] causedTrace, final ExtendedStackTraceElement[] extStackTrace, - final List ignorePackages) { - if (ignorePackages == null || ignorePackages.isEmpty()) { - for (final ExtendedStackTraceElement element : extStackTrace) { - this.formatEntry(element, sb, prefix); - } - } else { - int count = 0; - for (int i = 0; i < extStackTrace.length; ++i) { - if (!this.ignoreElement(causedTrace[i], ignorePackages)) { - if (count > 0) { - appendSuppressedCount(sb, prefix, count); - count = 0; - } - this.formatEntry(extStackTrace[i], sb, prefix); - } else { - ++count; - } - } - if (count > 0) { - appendSuppressedCount(sb, prefix, count); - } - } - if (commonCount != 0) { - sb.append(prefix).append("\t... ").append(commonCount).append(" more").append(EOL); - } - } - - private void appendSuppressedCount(final StringBuilder sb, final String prefix, final int count) { - sb.append(prefix); - if (count == 1) { - sb.append("\t....").append(EOL); - } else { - sb.append("\t... suppressed ").append(count).append(" lines").append(EOL); - } - } - - private void formatEntry(final ExtendedStackTraceElement extStackTraceElement, final StringBuilder sb, final String prefix) { - sb.append(prefix); - sb.append("\tat "); - sb.append(extStackTraceElement); - sb.append(EOL); - } - - /** - * Formats the specified Throwable. - * - * @param sb - * StringBuilder to contain the formatted Throwable. - * @param cause - * The Throwable to format. - */ - public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause) { - this.formatWrapper(sb, cause, null); - } - - /** - * Formats the specified Throwable. - * - * @param sb - * StringBuilder to contain the formatted Throwable. - * @param cause - * The Throwable to format. - * @param packages - * The List of packages to be suppressed from the trace. - */ - @SuppressWarnings("ThrowableResultOfMethodCallIgnored") - public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause, final List packages) { - final Throwable caused = cause.getCauseProxy() != null ? cause.getCauseProxy().getThrowable() : null; - if (caused != null) { - this.formatWrapper(sb, cause.causeProxy); - sb.append(WRAPPED_BY_LABEL); - } - sb.append(cause).append(EOL); - this.formatElements(sb, "", cause.commonElementCount, - cause.getThrowable().getStackTrace(), cause.extendedStackTrace, packages); - } - - public ThrowableProxy getCauseProxy() { - return this.causeProxy; - } - - /** - * Format the Throwable that is the cause of this Throwable. - * - * @return The formatted Throwable that caused this Throwable. - */ - public String getCauseStackTraceAsString() { - return this.getCauseStackTraceAsString(null); - } - - /** - * Format the Throwable that is the cause of this Throwable. - * - * @param packages - * The List of packages to be suppressed from the trace. - * @return The formatted Throwable that caused this Throwable. - */ - public String getCauseStackTraceAsString(final List packages) { - final StringBuilder sb = new StringBuilder(); - if (this.causeProxy != null) { - this.formatWrapper(sb, this.causeProxy); - sb.append(WRAPPED_BY_LABEL); - } - sb.append(this.toString()); - sb.append(EOL); - this.formatElements(sb, "", 0, this.throwable.getStackTrace(), this.extendedStackTrace, packages); - return sb.toString(); - } - - /** - * Return the number of elements that are being omitted because they are common with the parent Throwable's stack - * trace. - * - * @return The number of elements omitted from the stack trace. - */ - public int getCommonElementCount() { - return this.commonElementCount; - } - - /** - * Gets the stack trace including packaging information. - * - * @return The stack trace including packaging information. - */ - public ExtendedStackTraceElement[] getExtendedStackTrace() { - return this.extendedStackTrace; - } - - /** - * Format the stack trace including packaging information. - * - * @return The formatted stack trace including packaging information. - */ - public String getExtendedStackTraceAsString() { - return this.getExtendedStackTraceAsString(null); - } - - /** - * Format the stack trace including packaging information. - * - * @param ignorePackages - * List of packages to be ignored in the trace. - * @return The formatted stack trace including packaging information. - */ - public String getExtendedStackTraceAsString(final List ignorePackages) { - final StringBuilder sb = new StringBuilder(this.name); - final String msg = this.message; - if (msg != null) { - sb.append(": ").append(msg); - } - sb.append(EOL); - final StackTraceElement[] causedTrace = this.throwable != null ? this.throwable.getStackTrace() : null; - this.formatElements(sb, "", 0, causedTrace, this.extendedStackTrace, ignorePackages); - this.formatSuppressed(sb, "\t", this.suppressedProxies, ignorePackages); - this.formatCause(sb, "", this.causeProxy, ignorePackages); - return sb.toString(); - } - - public String getLocalizedMessage() { - return this.localizedMessage; - } - - public String getMessage() { - return this.message; - } - - /** - * Return the FQCN of the Throwable. - * - * @return The FQCN of the Throwable. - */ - public String getName() { - return this.name; - } - - public StackTraceElement[] getStackTrace() { - return this.throwable == null ? null : this.throwable.getStackTrace(); - } - - /** - * Gets proxies for suppressed exceptions. - * - * @return proxies for suppressed exceptions. - */ - public ThrowableProxy[] getSuppressedProxies() { - return this.suppressedProxies; - } - - /** - * Format the suppressed Throwables. - * - * @return The formatted suppressed Throwables. - */ - public String getSuppressedStackTrace() { - final ThrowableProxy[] suppressed = this.getSuppressedProxies(); - if (suppressed == null || suppressed.length == 0) { - return Strings.EMPTY; - } - final StringBuilder sb = new StringBuilder("Suppressed Stack Trace Elements:").append(EOL); - for (final ThrowableProxy proxy : suppressed) { - sb.append(proxy.getExtendedStackTraceAsString()); - } - return sb.toString(); - } - - /** - * The throwable or null if this object is deserialized from XML or JSON. - * - * @return The throwable or null if this object is deserialized from XML or JSON. - */ - public Throwable getThrowable() { - return this.throwable; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (this.causeProxy == null ? 0 : this.causeProxy.hashCode()); - result = prime * result + this.commonElementCount; - result = prime * result + (this.extendedStackTrace == null ? 0 : Arrays.hashCode(this.extendedStackTrace)); - result = prime * result + (this.suppressedProxies == null ? 0 : Arrays.hashCode(this.suppressedProxies)); - result = prime * result + (this.name == null ? 0 : this.name.hashCode()); - return result; - } - - private boolean ignoreElement(final StackTraceElement element, final List ignorePackages) { - final String className = element.getClassName(); - for (final String pkg : ignorePackages) { - if (className.startsWith(pkg)) { - return true; - } - } - return false; - } - - /** - * Loads classes not located via Reflection.getCallerClass. - * - * @param lastLoader - * The ClassLoader that loaded the Class that called this Class. - * @param className - * The name of the Class. - * @return The Class object for the Class or null if it could not be located. - */ - private Class loadClass(final ClassLoader lastLoader, final String className) { - // XXX: this is overly complicated - Class clazz; - if (lastLoader != null) { - try { - clazz = Loader.initializeClass(className, lastLoader); - if (clazz != null) { - return clazz; - } - } catch (final Throwable ignore) { - // Ignore exception. - } - } - try { - clazz = Loader.loadClass(className); - } catch (final ClassNotFoundException ignored) { - return initializeClass(className); - } catch (final NoClassDefFoundError ignored) { - return initializeClass(className); - } catch (final SecurityException ignored) { - return initializeClass(className); - } - return clazz; - } - - private Class initializeClass(final String className) { - try { - return Loader.initializeClass(className, this.getClass().getClassLoader()); - } catch (final ClassNotFoundException ignore) { - return null; - } catch (final NoClassDefFoundError ignore) { - return null; - } catch (final SecurityException ignore) { - return null; - } - } - - /** - * Construct the CacheEntry from the Class's information. - * - * @param stackTraceElement - * The stack trace element - * @param callerClass - * The Class. - * @param exact - * True if the class was obtained via Reflection.getCallerClass. - * - * @return The CacheEntry. - */ - private CacheEntry toCacheEntry(final StackTraceElement stackTraceElement, final Class callerClass, - final boolean exact) { - String location = "?"; - String version = "?"; - ClassLoader lastLoader = null; - if (callerClass != null) { - try { - final CodeSource source = callerClass.getProtectionDomain().getCodeSource(); - if (source != null) { - final URL locationURL = source.getLocation(); - if (locationURL != null) { - final String str = locationURL.toString().replace('\\', '/'); - int index = str.lastIndexOf("/"); - if (index >= 0 && index == str.length() - 1) { - index = str.lastIndexOf("/", index - 1); - location = str.substring(index + 1); - } else { - location = str.substring(index + 1); - } - } - } - } catch (final Exception ex) { - // Ignore the exception. - } - final Package pkg = callerClass.getPackage(); - if (pkg != null) { - final String ver = pkg.getImplementationVersion(); - if (ver != null) { - version = ver; - } - } - lastLoader = callerClass.getClassLoader(); - } - return new CacheEntry(new ExtendedClassInfo(exact, location, version), lastLoader); - } - - /** - * Resolve all the stack entries in this stack trace that are not common with the parent. - * - * @param stack - * The callers Class stack. - * @param map - * The cache of CacheEntry objects. - * @param rootTrace - * The first stack trace resolve or null. - * @param stackTrace - * The stack trace being resolved. - * @return The StackTracePackageElement array. - */ - ExtendedStackTraceElement[] toExtendedStackTrace(final Stack> stack, final Map map, - final StackTraceElement[] rootTrace, final StackTraceElement[] stackTrace) { - int stackLength; - if (rootTrace != null) { - int rootIndex = rootTrace.length - 1; - int stackIndex = stackTrace.length - 1; - while (rootIndex >= 0 && stackIndex >= 0 && rootTrace[rootIndex].equals(stackTrace[stackIndex])) { - --rootIndex; - --stackIndex; - } - this.commonElementCount = stackTrace.length - 1 - stackIndex; - stackLength = stackIndex + 1; - } else { - this.commonElementCount = 0; - stackLength = stackTrace.length; - } - final ExtendedStackTraceElement[] extStackTrace = new ExtendedStackTraceElement[stackLength]; - Class clazz = stack.isEmpty() ? null : stack.peek(); - ClassLoader lastLoader = null; - for (int i = stackLength - 1; i >= 0; --i) { - final StackTraceElement stackTraceElement = stackTrace[i]; - final String className = stackTraceElement.getClassName(); - // The stack returned from getCurrentStack may be missing entries for java.lang.reflect.Method.invoke() - // and its implementation. The Throwable might also contain stack entries that are no longer - // present as those methods have returned. - ExtendedClassInfo extClassInfo; - if (clazz != null && className.equals(clazz.getName())) { - final CacheEntry entry = this.toCacheEntry(stackTraceElement, clazz, true); - extClassInfo = entry.element; - lastLoader = entry.loader; - stack.pop(); - clazz = stack.isEmpty() ? null : stack.peek(); - } else { - final CacheEntry cacheEntry = map.get(className); - if (cacheEntry != null) { - final CacheEntry entry = cacheEntry; - extClassInfo = entry.element; - if (entry.loader != null) { - lastLoader = entry.loader; - } - } else { - final CacheEntry entry = this.toCacheEntry(stackTraceElement, - this.loadClass(lastLoader, className), false); - extClassInfo = entry.element; - map.put(stackTraceElement.toString(), entry); - if (entry.loader != null) { - lastLoader = entry.loader; - } - } - } - extStackTrace[i] = new ExtendedStackTraceElement(stackTraceElement, extClassInfo); - } - return extStackTrace; - } - - @Override - public String toString() { - final String msg = this.message; - return msg != null ? this.name + ": " + msg : this.name; - } - - private ThrowableProxy[] toSuppressedProxies(final Throwable thrown, Set suppressedVisited) { - try { - final Throwable[] suppressed = thrown.getSuppressed(); - if (suppressed == null) { - return EMPTY_THROWABLE_PROXY_ARRAY; - } - final List proxies = new ArrayList<>(suppressed.length); - if (suppressedVisited == null) { - suppressedVisited = new HashSet<>(proxies.size()); - } - for (int i = 0; i < suppressed.length; i++) { - final Throwable candidate = suppressed[i]; - if (!suppressedVisited.contains(candidate)) { - suppressedVisited.add(candidate); - proxies.add(new ThrowableProxy(candidate, suppressedVisited)); - } - } - return proxies.toArray(new ThrowableProxy[proxies.size()]); - } catch (final Exception e) { - StatusLogger.getLogger().error(e); - } - return null; - } -} diff --git a/core/src/main/java/org/apache/logging/log4j/core/jmx/Server.java b/core/src/main/java/org/apache/logging/log4j/core/jmx/Server.java deleted file mode 100644 index 8edf959a4fa..00000000000 --- a/core/src/main/java/org/apache/logging/log4j/core/jmx/Server.java +++ /dev/null @@ -1,392 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache license, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the license for the specific language governing permissions and - * limitations under the license. - */ -package org.apache.logging.log4j.core.jmx; - -import java.lang.management.ManagementFactory; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -import javax.management.InstanceAlreadyExistsException; -import javax.management.MBeanRegistrationException; -import javax.management.MBeanServer; -import javax.management.NotCompliantMBeanException; -import javax.management.ObjectName; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.Appender; -import org.apache.logging.log4j.core.LoggerContext; -import org.apache.logging.log4j.core.appender.AsyncAppender; -import org.apache.logging.log4j.core.async.AsyncLoggerConfig; -import org.apache.logging.log4j.core.async.AsyncLoggerContext; -import org.apache.logging.log4j.core.async.DaemonThreadFactory; -import org.apache.logging.log4j.core.config.LoggerConfig; -import org.apache.logging.log4j.core.impl.Log4jContextFactory; -import org.apache.logging.log4j.core.selector.ContextSelector; -import org.apache.logging.log4j.core.util.Constants; -import org.apache.logging.log4j.spi.LoggerContextFactory; -import org.apache.logging.log4j.status.StatusLogger; -import org.apache.logging.log4j.util.PropertiesUtil; -import org.elasticsearch.common.SuppressForbidden; - -/** - * Creates MBeans to instrument various classes in the log4j class hierarchy. - *

- * All instrumentation for Log4j 2 classes can be disabled by setting system property {@code -Dlog4j2.disable.jmx=true}. - *

- */ -@SuppressForbidden(reason = "copied class to hack around Log4j bug") -public final class Server { - - /** - * The domain part, or prefix ({@value}) of the {@code ObjectName} of all MBeans that instrument Log4J2 components. - */ - public static final String DOMAIN = "org.apache.logging.log4j2"; - private static final String PROPERTY_DISABLE_JMX = "log4j2.disable.jmx"; - private static final String PROPERTY_ASYNC_NOTIF = "log4j2.jmx.notify.async"; - private static final String THREAD_NAME_PREFIX = "log4j2.jmx.notif"; - private static final StatusLogger LOGGER = StatusLogger.getLogger(); - static final Executor executor = isJmxDisabled() ? null : createExecutor(); - - private Server() { - } - - /** - * Returns either a {@code null} Executor (causing JMX notifications to be sent from the caller thread) or a daemon - * background thread Executor, depending on the value of system property "log4j2.jmx.notify.async". If this - * property is not set, use a {@code null} Executor for web apps to avoid memory leaks and other issues when the - * web app is restarted. - * @see LOG4J2-938 - */ - private static ExecutorService createExecutor() { - final boolean defaultAsync = !Constants.IS_WEB_APP; - final boolean async = PropertiesUtil.getProperties().getBooleanProperty(PROPERTY_ASYNC_NOTIF, defaultAsync); - return async ? Executors.newFixedThreadPool(1, new DaemonThreadFactory(THREAD_NAME_PREFIX)) : null; - } - - /** - * Either returns the specified name as is, or returns a quoted value containing the specified name with the special - * characters (comma, equals, colon, quote, asterisk, or question mark) preceded with a backslash. - * - * @param name the name to escape so it can be used as a value in an {@link ObjectName}. - * @return the escaped name - */ - public static String escape(final String name) { - final StringBuilder sb = new StringBuilder(name.length() * 2); - boolean needsQuotes = false; - for (int i = 0; i < name.length(); i++) { - final char c = name.charAt(i); - switch (c) { - case '\\': - case '*': - case '?': - case '\"': - // quote, star, question & backslash must be escaped - sb.append('\\'); - needsQuotes = true; // ... and can only appear in quoted value - break; - case ',': - case '=': - case ':': - // no need to escape these, but value must be quoted - needsQuotes = true; - break; - case '\r': - // drop \r characters: \\r gives "invalid escape sequence" - continue; - case '\n': - // replace \n characters with \\n sequence - sb.append("\\n"); - needsQuotes = true; - continue; - } - sb.append(c); - } - if (needsQuotes) { - sb.insert(0, '\"'); - sb.append('\"'); - } - return sb.toString(); - } - - private static boolean isJmxDisabled() { - return PropertiesUtil.getProperties().getBooleanProperty(PROPERTY_DISABLE_JMX); - } - - public static void reregisterMBeansAfterReconfigure() { - // avoid creating Platform MBean Server if JMX disabled - if (isJmxDisabled()) { - LOGGER.debug("JMX disabled for log4j2. Not registering MBeans."); - return; - } - final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - reregisterMBeansAfterReconfigure(mbs); - } - - public static void reregisterMBeansAfterReconfigure(final MBeanServer mbs) { - if (isJmxDisabled()) { - LOGGER.debug("JMX disabled for log4j2. Not registering MBeans."); - return; - } - - // now provide instrumentation for the newly configured - // LoggerConfigs and Appenders - try { - final ContextSelector selector = getContextSelector(); - if (selector == null) { - LOGGER.debug("Could not register MBeans: no ContextSelector found."); - return; - } - LOGGER.trace("Reregistering MBeans after reconfigure. Selector={}", selector); - final List contexts = selector.getLoggerContexts(); - int i = 0; - for (final LoggerContext ctx : contexts) { - LOGGER.trace("Reregistering context ({}/{}): '{}' {}", ++i, contexts.size(), ctx.getName(), ctx); - // first unregister the context and all nested loggers, - // appenders, statusLogger, contextSelector, ringbuffers... - unregisterLoggerContext(ctx.getName(), mbs); - - final LoggerContextAdmin mbean = new LoggerContextAdmin(ctx, executor); - register(mbs, mbean, mbean.getObjectName()); - - if (ctx instanceof AsyncLoggerContext) { - final RingBufferAdmin rbmbean = ((AsyncLoggerContext) ctx).createRingBufferAdmin(); - if (rbmbean.getBufferSize() > 0) { - // don't register if Disruptor not started (DefaultConfiguration: config not found) - register(mbs, rbmbean, rbmbean.getObjectName()); - } - } - - // register the status logger and the context selector - // repeatedly - // for each known context: if one context is unregistered, - // these MBeans should still be available for the other - // contexts. - registerStatusLogger(ctx.getName(), mbs, executor); - registerContextSelector(ctx.getName(), selector, mbs, executor); - - registerLoggerConfigs(ctx, mbs, executor); - registerAppenders(ctx, mbs, executor); - } - } catch (final Exception ex) { - LOGGER.error("Could not register mbeans", ex); - } - } - - /** - * Unregister all log4j MBeans from the platform MBean server. - */ - public static void unregisterMBeans() { - if (isJmxDisabled()) { - LOGGER.debug("JMX disabled for Log4j2. Not unregistering MBeans."); - return; - } - final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - unregisterMBeans(mbs); - } - - /** - * Unregister all log4j MBeans from the specified MBean server. - * - * @param mbs the MBean server to unregister from. - */ - public static void unregisterMBeans(final MBeanServer mbs) { - unregisterStatusLogger("*", mbs); - unregisterContextSelector("*", mbs); - unregisterContexts(mbs); - unregisterLoggerConfigs("*", mbs); - unregisterAsyncLoggerRingBufferAdmins("*", mbs); - unregisterAsyncLoggerConfigRingBufferAdmins("*", mbs); - unregisterAppenders("*", mbs); - unregisterAsyncAppenders("*", mbs); - } - - /** - * Returns the {@code ContextSelector} of the current {@code Log4jContextFactory}. - * - * @return the {@code ContextSelector} of the current {@code Log4jContextFactory} - */ - private static ContextSelector getContextSelector() { - final LoggerContextFactory factory = LogManager.getFactory(); - if (factory instanceof Log4jContextFactory) { - final ContextSelector selector = ((Log4jContextFactory) factory).getSelector(); - return selector; - } - return null; - } - - /** - * Unregisters all MBeans associated with the specified logger context (including MBeans for {@code LoggerConfig}s - * and {@code Appender}s from the platform MBean server. - * - * @param loggerContextName name of the logger context to unregister - */ - public static void unregisterLoggerContext(final String loggerContextName) { - if (isJmxDisabled()) { - LOGGER.debug("JMX disabled for Log4j2. Not unregistering MBeans."); - return; - } - final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - unregisterLoggerContext(loggerContextName, mbs); - } - - /** - * Unregisters all MBeans associated with the specified logger context (including MBeans for {@code LoggerConfig}s - * and {@code Appender}s from the platform MBean server. - * - * @param contextName name of the logger context to unregister - * @param mbs the MBean Server to unregister the instrumented objects from - */ - public static void unregisterLoggerContext(final String contextName, final MBeanServer mbs) { - final String pattern = LoggerContextAdminMBean.PATTERN; - final String search = String.format(pattern, escape(contextName), "*"); - unregisterAllMatching(search, mbs); // unregister context mbean - - // now unregister all MBeans associated with this logger context - unregisterStatusLogger(contextName, mbs); - unregisterContextSelector(contextName, mbs); - unregisterLoggerConfigs(contextName, mbs); - unregisterAppenders(contextName, mbs); - unregisterAsyncAppenders(contextName, mbs); - unregisterAsyncLoggerRingBufferAdmins(contextName, mbs); - unregisterAsyncLoggerConfigRingBufferAdmins(contextName, mbs); - } - - private static void registerStatusLogger(final String contextName, final MBeanServer mbs, final Executor executor) - throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException { - - final StatusLoggerAdmin mbean = new StatusLoggerAdmin(contextName, executor); - register(mbs, mbean, mbean.getObjectName()); - } - - private static void registerContextSelector(final String contextName, final ContextSelector selector, - final MBeanServer mbs, final Executor executor) throws InstanceAlreadyExistsException, - MBeanRegistrationException, NotCompliantMBeanException { - - final ContextSelectorAdmin mbean = new ContextSelectorAdmin(contextName, selector); - register(mbs, mbean, mbean.getObjectName()); - } - - private static void unregisterStatusLogger(final String contextName, final MBeanServer mbs) { - final String pattern = StatusLoggerAdminMBean.PATTERN; - final String search = String.format(pattern, escape(contextName), "*"); - unregisterAllMatching(search, mbs); - } - - private static void unregisterContextSelector(final String contextName, final MBeanServer mbs) { - final String pattern = ContextSelectorAdminMBean.PATTERN; - final String search = String.format(pattern, escape(contextName), "*"); - unregisterAllMatching(search, mbs); - } - - private static void unregisterLoggerConfigs(final String contextName, final MBeanServer mbs) { - final String pattern = LoggerConfigAdminMBean.PATTERN; - final String search = String.format(pattern, escape(contextName), "*"); - unregisterAllMatching(search, mbs); - } - - private static void unregisterContexts(final MBeanServer mbs) { - final String pattern = LoggerContextAdminMBean.PATTERN; - final String search = String.format(pattern, "*"); - unregisterAllMatching(search, mbs); - } - - private static void unregisterAppenders(final String contextName, final MBeanServer mbs) { - final String pattern = AppenderAdminMBean.PATTERN; - final String search = String.format(pattern, escape(contextName), "*"); - unregisterAllMatching(search, mbs); - } - - private static void unregisterAsyncAppenders(final String contextName, final MBeanServer mbs) { - final String pattern = AsyncAppenderAdminMBean.PATTERN; - final String search = String.format(pattern, escape(contextName), "*"); - unregisterAllMatching(search, mbs); - } - - private static void unregisterAsyncLoggerRingBufferAdmins(final String contextName, final MBeanServer mbs) { - final String pattern1 = RingBufferAdminMBean.PATTERN_ASYNC_LOGGER; - final String search1 = String.format(pattern1, escape(contextName)); - unregisterAllMatching(search1, mbs); - } - - private static void unregisterAsyncLoggerConfigRingBufferAdmins(final String contextName, final MBeanServer mbs) { - final String pattern2 = RingBufferAdminMBean.PATTERN_ASYNC_LOGGER_CONFIG; - final String search2 = String.format(pattern2, escape(contextName), "*"); - unregisterAllMatching(search2, mbs); - } - - private static void unregisterAllMatching(final String search, final MBeanServer mbs) { - try { - final ObjectName pattern = new ObjectName(search); - final Set found = mbs.queryNames(pattern, null); - if (found.isEmpty()) { - LOGGER.trace("Unregistering but no MBeans found matching '{}'", search); - } else { - LOGGER.trace("Unregistering {} MBeans: {}", found.size(), found); - } - for (final ObjectName objectName : found) { - mbs.unregisterMBean(objectName); - } - } catch (final Exception ex) { - LOGGER.error("Could not unregister MBeans for " + search, ex); - } - } - - private static void registerLoggerConfigs(final LoggerContext ctx, final MBeanServer mbs, final Executor executor) - throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException { - - final Map map = ctx.getConfiguration().getLoggers(); - for (final String name : map.keySet()) { - final LoggerConfig cfg = map.get(name); - final LoggerConfigAdmin mbean = new LoggerConfigAdmin(ctx, cfg); - register(mbs, mbean, mbean.getObjectName()); - - if (cfg instanceof AsyncLoggerConfig) { - final AsyncLoggerConfig async = (AsyncLoggerConfig) cfg; - final RingBufferAdmin rbmbean = async.createRingBufferAdmin(ctx.getName()); - register(mbs, rbmbean, rbmbean.getObjectName()); - } - } - } - - private static void registerAppenders(final LoggerContext ctx, final MBeanServer mbs, final Executor executor) - throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException { - - final Map map = ctx.getConfiguration().getAppenders(); - for (final String name : map.keySet()) { - final Appender appender = map.get(name); - - if (appender instanceof AsyncAppender) { - final AsyncAppender async = ((AsyncAppender) appender); - final AsyncAppenderAdmin mbean = new AsyncAppenderAdmin(ctx.getName(), async); - register(mbs, mbean, mbean.getObjectName()); - } else { - final AppenderAdmin mbean = new AppenderAdmin(ctx.getName(), appender); - register(mbs, mbean, mbean.getObjectName()); - } - } - } - - private static void register(final MBeanServer mbs, final Object mbean, final ObjectName objectName) - throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException { - LOGGER.debug("Registering MBean {}", objectName); - mbs.registerMBean(mbean, objectName); - } -} diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/ExistsFieldQueryExtension.java b/core/src/main/java/org/apache/lucene/queryparser/classic/ExistsFieldQueryExtension.java index cb4bee30aaa..7c3e8652c07 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/ExistsFieldQueryExtension.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/ExistsFieldQueryExtension.java @@ -24,9 +24,6 @@ import org.apache.lucene.search.Query; import org.elasticsearch.index.query.ExistsQueryBuilder; import org.elasticsearch.index.query.QueryShardContext; -/** - * - */ public class ExistsFieldQueryExtension implements FieldQueryExtension { public static final String NAME = "_exists_"; diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/FieldQueryExtension.java b/core/src/main/java/org/apache/lucene/queryparser/classic/FieldQueryExtension.java index 299a37a1550..6d7e6dc6eca 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/FieldQueryExtension.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/FieldQueryExtension.java @@ -22,9 +22,6 @@ package org.apache.lucene.queryparser.classic; import org.apache.lucene.search.Query; import org.elasticsearch.index.query.QueryShardContext; -/** - * - */ public interface FieldQueryExtension { Query query(QueryShardContext context, String queryText); diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index bcf0a2b201a..ac9770f2bc8 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.index.Term; +import org.apache.lucene.queryparser.analyzing.AnalyzingQueryParser; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -34,6 +35,7 @@ import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.common.lucene.search.Queries; @@ -42,6 +44,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.LegacyDateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.StringFieldType; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; @@ -63,7 +66,7 @@ import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfN * Also breaks fields with [type].[name] into a boolean query that must include the type * as well as the query on the name. */ -public class MapperQueryParser extends QueryParser { +public class MapperQueryParser extends AnalyzingQueryParser { public static final Map FIELD_QUERY_EXTENSIONS; @@ -99,11 +102,11 @@ public class MapperQueryParser extends QueryParser { setAutoGeneratePhraseQueries(settings.autoGeneratePhraseQueries()); setMaxDeterminizedStates(settings.maxDeterminizedStates()); setAllowLeadingWildcard(settings.allowLeadingWildcard()); - setLowercaseExpandedTerms(settings.lowercaseExpandedTerms()); + setLowercaseExpandedTerms(false); setPhraseSlop(settings.phraseSlop()); setDefaultOperator(settings.defaultOperator()); setFuzzyPrefixLength(settings.fuzzyPrefixLength()); - setLocale(settings.locale()); + setSplitOnWhitespace(settings.splitOnWhitespace()); } /** @@ -180,17 +183,17 @@ public class MapperQueryParser extends QueryParser { if (queryText.charAt(0) == '>') { if (queryText.length() > 2) { if (queryText.charAt(1) == '=') { - return getRangeQuerySingle(field, queryText.substring(2), null, true, true); + return getRangeQuerySingle(field, queryText.substring(2), null, true, true, context); } } - return getRangeQuerySingle(field, queryText.substring(1), null, false, true); + return getRangeQuerySingle(field, queryText.substring(1), null, false, true, context); } else if (queryText.charAt(0) == '<') { if (queryText.length() > 2) { if (queryText.charAt(1) == '=') { - return getRangeQuerySingle(field, null, queryText.substring(2), true, true); + return getRangeQuerySingle(field, null, queryText.substring(2), true, true, context); } } - return getRangeQuerySingle(field, null, queryText.substring(1), true, false); + return getRangeQuerySingle(field, null, queryText.substring(1), true, false, context); } } currentFieldType = null; @@ -290,19 +293,19 @@ public class MapperQueryParser extends QueryParser { Collection fields = extractMultiFields(field); if (fields == null) { - return getRangeQuerySingle(field, part1, part2, startInclusive, endInclusive); + return getRangeQuerySingle(field, part1, part2, startInclusive, endInclusive, context); } if (fields.size() == 1) { - return getRangeQuerySingle(fields.iterator().next(), part1, part2, startInclusive, endInclusive); + return getRangeQuerySingle(fields.iterator().next(), part1, part2, startInclusive, endInclusive, context); } if (settings.useDisMax()) { List queries = new ArrayList<>(); boolean added = false; for (String mField : fields) { - Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive); + Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive, context); if (q != null) { added = true; queries.add(applyBoost(mField, q)); @@ -315,7 +318,7 @@ public class MapperQueryParser extends QueryParser { } else { List clauses = new ArrayList<>(); for (String mField : fields) { - Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive); + Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive, context); if (q != null) { clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD)); } @@ -326,24 +329,23 @@ public class MapperQueryParser extends QueryParser { } private Query getRangeQuerySingle(String field, String part1, String part2, - boolean startInclusive, boolean endInclusive) { + boolean startInclusive, boolean endInclusive, QueryShardContext context) { currentFieldType = context.fieldMapper(field); if (currentFieldType != null) { - if (lowercaseExpandedTerms && currentFieldType.tokenized()) { - part1 = part1 == null ? null : part1.toLowerCase(locale); - part2 = part2 == null ? null : part2.toLowerCase(locale); - } - try { + BytesRef part1Binary = part1 == null ? null : getAnalyzer().normalize(field, part1); + BytesRef part2Binary = part2 == null ? null : getAnalyzer().normalize(field, part2); Query rangeQuery; if (currentFieldType instanceof LegacyDateFieldMapper.DateFieldType && settings.timeZone() != null) { LegacyDateFieldMapper.DateFieldType dateFieldType = (LegacyDateFieldMapper.DateFieldType) this.currentFieldType; - rangeQuery = dateFieldType.rangeQuery(part1, part2, startInclusive, endInclusive, settings.timeZone(), null); + rangeQuery = dateFieldType.rangeQuery(part1Binary, part2Binary, + startInclusive, endInclusive, settings.timeZone(), null, context); } else if (currentFieldType instanceof DateFieldMapper.DateFieldType && settings.timeZone() != null) { DateFieldMapper.DateFieldType dateFieldType = (DateFieldMapper.DateFieldType) this.currentFieldType; - rangeQuery = dateFieldType.rangeQuery(part1, part2, startInclusive, endInclusive, settings.timeZone(), null); + rangeQuery = dateFieldType.rangeQuery(part1Binary, part2Binary, + startInclusive, endInclusive, settings.timeZone(), null, context); } else { - rangeQuery = currentFieldType.rangeQuery(part1, part2, startInclusive, endInclusive); + rangeQuery = currentFieldType.rangeQuery(part1Binary, part2Binary, startInclusive, endInclusive, context); } return rangeQuery; } catch (RuntimeException e) { @@ -357,9 +359,6 @@ public class MapperQueryParser extends QueryParser { } protected Query getFuzzyQuery(String field, String termStr, String minSimilarity) throws ParseException { - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(locale); - } Collection fields = extractMultiFields(field); if (fields != null) { if (fields.size() == 1) { @@ -398,8 +397,9 @@ public class MapperQueryParser extends QueryParser { currentFieldType = context.fieldMapper(field); if (currentFieldType != null) { try { - return currentFieldType.fuzzyQuery(termStr, Fuzziness.build(minSimilarity), - fuzzyPrefixLength, settings.fuzzyMaxExpansions(), FuzzyQuery.defaultTranspositions); + BytesRef term = termStr == null ? null : getAnalyzer().normalize(field, termStr); + return currentFieldType.fuzzyQuery(term, Fuzziness.build(minSimilarity), + getFuzzyPrefixLength(), settings.fuzzyMaxExpansions(), FuzzyQuery.defaultTranspositions); } catch (RuntimeException e) { if (settings.lenient()) { return null; @@ -422,9 +422,6 @@ public class MapperQueryParser extends QueryParser { @Override protected Query getPrefixQuery(String field, String termStr) throws ParseException { - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(locale); - } Collection fields = extractMultiFields(field); if (fields != null) { if (fields.size() == 1) { @@ -470,8 +467,8 @@ public class MapperQueryParser extends QueryParser { setAnalyzer(context.getSearchAnalyzer(currentFieldType)); } Query query = null; - if (currentFieldType.tokenized() == false) { - query = currentFieldType.prefixQuery(termStr, multiTermRewriteMethod, context); + if (currentFieldType instanceof StringFieldType == false) { + query = currentFieldType.prefixQuery(termStr, getMultiTermRewriteMethod(), context); } if (query == null) { query = getPossiblyAnalyzedPrefixQuery(currentFieldType.name(), termStr); @@ -589,9 +586,6 @@ public class MapperQueryParser extends QueryParser { return FIELD_QUERY_EXTENSIONS.get(ExistsFieldQueryExtension.NAME).query(context, actualField); } } - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(locale); - } Collection fields = extractMultiFields(field); if (fields != null) { if (fields.size() == 1) { @@ -638,9 +632,8 @@ public class MapperQueryParser extends QueryParser { setAnalyzer(context.getSearchAnalyzer(currentFieldType)); } indexedNameField = currentFieldType.name(); - return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr); } - return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr); + return super.getWildcardQuery(indexedNameField, termStr); } catch (RuntimeException e) { if (settings.lenient()) { return null; @@ -651,75 +644,8 @@ public class MapperQueryParser extends QueryParser { } } - private Query getPossiblyAnalyzedWildcardQuery(String field, String termStr) throws ParseException { - if (!settings.analyzeWildcard()) { - return super.getWildcardQuery(field, termStr); - } - boolean isWithinToken = (!termStr.startsWith("?") && !termStr.startsWith("*")); - StringBuilder aggStr = new StringBuilder(); - StringBuilder tmp = new StringBuilder(); - for (int i = 0; i < termStr.length(); i++) { - char c = termStr.charAt(i); - if (c == '?' || c == '*') { - if (isWithinToken) { - try (TokenStream source = getAnalyzer().tokenStream(field, tmp.toString())) { - source.reset(); - CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class); - if (source.incrementToken()) { - String term = termAtt.toString(); - if (term.length() == 0) { - // no tokens, just use what we have now - aggStr.append(tmp); - } else { - aggStr.append(term); - } - } else { - // no tokens, just use what we have now - aggStr.append(tmp); - } - } catch (IOException e) { - aggStr.append(tmp); - } - tmp.setLength(0); - } - isWithinToken = false; - aggStr.append(c); - } else { - tmp.append(c); - isWithinToken = true; - } - } - if (isWithinToken) { - try { - try (TokenStream source = getAnalyzer().tokenStream(field, tmp.toString())) { - source.reset(); - CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class); - if (source.incrementToken()) { - String term = termAtt.toString(); - if (term.length() == 0) { - // no tokens, just use what we have now - aggStr.append(tmp); - } else { - aggStr.append(term); - } - } else { - // no tokens, just use what we have now - aggStr.append(tmp); - } - } - } catch (IOException e) { - aggStr.append(tmp); - } - } - - return super.getWildcardQuery(field, aggStr.toString()); - } - @Override protected Query getRegexpQuery(String field, String termStr) throws ParseException { - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(locale); - } Collection fields = extractMultiFields(field); if (fields != null) { if (fields.size() == 1) { @@ -767,7 +693,7 @@ public class MapperQueryParser extends QueryParser { Query query = null; if (currentFieldType.tokenized() == false) { query = currentFieldType.regexpQuery(termStr, RegExp.ALL, - maxDeterminizedStates, multiTermRewriteMethod, context); + getMaxDeterminizedStates(), getMultiTermRewriteMethod(), context); } if (query == null) { query = super.getRegexpQuery(field, termStr); diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java b/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java index c1fc2ae556e..295c1ace4f6 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.MultiTermQuery; import org.elasticsearch.common.unit.Fuzziness; import org.joda.time.DateTimeZone; -import java.util.Locale; import java.util.Map; /** @@ -53,12 +52,8 @@ public class QueryParserSettings { private boolean analyzeWildcard; - private boolean lowercaseExpandedTerms; - private boolean enablePositionIncrements; - private Locale locale; - private Fuzziness fuzziness; private int fuzzyPrefixLength; private int fuzzyMaxExpansions; @@ -79,6 +74,8 @@ public class QueryParserSettings { /** To limit effort spent determinizing regexp queries. */ private int maxDeterminizedStates; + private boolean splitOnWhitespace; + public QueryParserSettings(String queryString) { this.queryString = queryString; } @@ -135,14 +132,6 @@ public class QueryParserSettings { this.allowLeadingWildcard = allowLeadingWildcard; } - public boolean lowercaseExpandedTerms() { - return lowercaseExpandedTerms; - } - - public void lowercaseExpandedTerms(boolean lowercaseExpandedTerms) { - this.lowercaseExpandedTerms = lowercaseExpandedTerms; - } - public boolean enablePositionIncrements() { return enablePositionIncrements; } @@ -267,14 +256,6 @@ public class QueryParserSettings { this.useDisMax = useDisMax; } - public void locale(Locale locale) { - this.locale = locale; - } - - public Locale locale() { - return this.locale; - } - public void timeZone(DateTimeZone timeZone) { this.timeZone = timeZone; } @@ -290,4 +271,12 @@ public class QueryParserSettings { public Fuzziness fuzziness() { return fuzziness; } + + public void splitOnWhitespace(boolean value) { + this.splitOnWhitespace = value; + } + + public boolean splitOnWhitespace() { + return splitOnWhitespace; + } } diff --git a/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java index 88b045d2a5e..f58d4b47424 100644 --- a/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.ToParentBlockJoinQuery; import org.apache.lucene.search.spans.SpanTermQuery; @@ -38,9 +39,6 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import java.io.IOException; import java.util.Collection; -/** - * - */ // LUCENE MONITOR // TODO: remove me! public class CustomFieldQuery extends FieldQuery { @@ -88,6 +86,13 @@ public class CustomFieldQuery extends FieldQuery { flatten(boostingQuery.getMatch(), reader, flatQueries, boost); //flatten negative query with negative boost flatten(boostingQuery.getContext(), reader, flatQueries, boostingQuery.getBoost()); + } else if (sourceQuery instanceof SynonymQuery) { + // SynonymQuery should be handled by the parent class directly. + // This statement should be removed when https://issues.apache.org/jira/browse/LUCENE-7484 is merged. + SynonymQuery synQuery = (SynonymQuery) sourceQuery; + for (Term term : synQuery.getTerms()) { + flatten(new TermQuery(term), reader, flatQueries, boost); + } } else { super.flatten(sourceQuery, reader, flatQueries, boost); } diff --git a/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java b/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java index 3f5e10cb895..8e25d812e92 100644 --- a/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java +++ b/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java @@ -22,8 +22,6 @@ import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.ByteSizeValue; -/** - */ public class StoreRateLimiting { public interface Provider { diff --git a/core/src/main/java/org/apache/lucene/store/StoreUtils.java b/core/src/main/java/org/apache/lucene/store/StoreUtils.java index b7de08b1ec1..c26bbbac353 100644 --- a/core/src/main/java/org/apache/lucene/store/StoreUtils.java +++ b/core/src/main/java/org/apache/lucene/store/StoreUtils.java @@ -20,8 +20,6 @@ package org.apache.lucene.store; import java.util.Arrays; -/** - */ public final class StoreUtils { private StoreUtils() { diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index 63161a0a187..eb33dbe4b18 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -19,7 +19,6 @@ package org.elasticsearch; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.common.io.stream.StreamInput; @@ -488,7 +487,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class, org.elasticsearch.snapshots.SnapshotCreationException::new, 27), DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class, - org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28), + org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),// deprecated in 6.0, remove in 7.0 DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class, org.elasticsearch.index.engine.DocumentMissingException::new, 29), SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class, @@ -582,7 +581,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class, org.elasticsearch.action.RoutingMissingException::new, 79), INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class, - org.elasticsearch.index.engine.IndexFailedEngineException::new, 80), + org.elasticsearch.index.engine.IndexFailedEngineException::new, 80), // deprecated in 6.0, remove in 7.0 INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class, org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81), REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class, @@ -693,7 +692,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte ShardStateAction.NoLongerPrimaryShardException::new, 142), SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143), NOT_MASTER_EXCEPTION(org.elasticsearch.cluster.NotMasterException.class, org.elasticsearch.cluster.NotMasterException::new, 144), - STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145); + STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145), + TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class, + org.elasticsearch.tasks.TaskCancelledException::new, 146); final Class exceptionClass; final FunctionThatThrowsIOException constructor; diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchParseException.java b/core/src/main/java/org/elasticsearch/ElasticsearchParseException.java index 1358ef54d9d..cdccdb8da95 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchParseException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchParseException.java @@ -24,9 +24,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class ElasticsearchParseException extends ElasticsearchException { public ElasticsearchParseException(String msg, Object... args) { diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java b/core/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java index 79e501b537a..0b809e0923b 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java @@ -19,9 +19,6 @@ package org.elasticsearch; -/** - * - */ public interface ElasticsearchWrapperException { Throwable getCause(); diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 83f706664a5..e9e950ce80a 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -29,8 +29,6 @@ import org.elasticsearch.monitor.jvm.JvmInfo; import java.io.IOException; -/** - */ public class Version { /* * The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA @@ -75,6 +73,8 @@ public class Version { public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final int V_2_4_0_ID = 2040099; public static final Version V_2_4_0 = new Version(V_2_4_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); + public static final int V_2_4_1_ID = 2040199; + public static final Version V_2_4_1 = new Version(V_2_4_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); public static final int V_5_0_0_alpha1_ID = 5000001; public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); public static final int V_5_0_0_alpha2_ID = 5000002; @@ -85,12 +85,25 @@ public class Version { public static final Version V_5_0_0_alpha4 = new Version(V_5_0_0_alpha4_ID, org.apache.lucene.util.Version.LUCENE_6_1_0); public static final int V_5_0_0_alpha5_ID = 5000005; public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0); - public static final int V_5_0_0_alpha6_ID = 5000006; - public static final Version V_5_0_0_alpha6 = new Version(V_5_0_0_alpha6_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); + public static final int V_5_0_0_beta1_ID = 5000026; + public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); + public static final int V_5_0_0_rc1_ID = 5000051; + public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); + public static final int V_5_0_0_ID = 5000099; + public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final int V_6_0_0_alpha1_ID = 6000001; - public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); + public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_3_0); public static final Version CURRENT = V_6_0_0_alpha1; + /* NOTE: don't add unreleased version to this list except of the version assigned to CURRENT. + * If you need a version that doesn't exist here for instance V_5_1_0 then go and create such a version + * as a constant where you need it: + *
+     *   public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
+     * 
+ * Then go to VersionsTest.java and add a test for this constant VersionTests#testUnknownVersions(). + * This is particularly useful if you are building a feature that needs a BWC layer for this unreleased version etc.*/ + static { assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to [" + org.apache.lucene.util.Version.LATEST + "] is still set to [" + CURRENT.luceneVersion + "]"; @@ -104,8 +117,12 @@ public class Version { switch (id) { case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; - case V_5_0_0_alpha6_ID: - return V_5_0_0_alpha6; + case V_5_0_0_ID: + return V_5_0_0; + case V_5_0_0_rc1_ID: + return V_5_0_0_rc1; + case V_5_0_0_beta1_ID: + return V_5_0_0_beta1; case V_5_0_0_alpha5_ID: return V_5_0_0_alpha5; case V_5_0_0_alpha4_ID: @@ -116,6 +133,8 @@ public class Version { return V_5_0_0_alpha2; case V_5_0_0_alpha1_ID: return V_5_0_0_alpha1; + case V_2_4_1_ID: + return V_2_4_1; case V_2_4_0_ID: return V_2_4_0; case V_2_3_5_ID: diff --git a/core/src/main/java/org/elasticsearch/action/ActionRequest.java b/core/src/main/java/org/elasticsearch/action/ActionRequest.java index 970afa413cc..e8dd639c4df 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionRequest.java +++ b/core/src/main/java/org/elasticsearch/action/ActionRequest.java @@ -25,9 +25,6 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; -/** - * - */ public abstract class ActionRequest> extends TransportRequest { public ActionRequest() { diff --git a/core/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java index 8cbc405dafb..076d4ae67f6 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java @@ -26,9 +26,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.Objects; -/** - * - */ public abstract class ActionRequestBuilder> { protected final Action action; diff --git a/core/src/main/java/org/elasticsearch/action/CompositeIndicesRequest.java b/core/src/main/java/org/elasticsearch/action/CompositeIndicesRequest.java index 126874d0c3c..5c88c57b83e 100644 --- a/core/src/main/java/org/elasticsearch/action/CompositeIndicesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/CompositeIndicesRequest.java @@ -22,11 +22,13 @@ package org.elasticsearch.action; import java.util.List; /** - * Needs to be implemented by all {@link org.elasticsearch.action.ActionRequest} subclasses that are composed of - * multiple subrequests which relate to one or more indices. Allows to retrieve those subrequests. + * Needs to be implemented by all {@link org.elasticsearch.action.ActionRequest} subclasses that are composed of multiple subrequests + * which relate to one or more indices. Allows to retrieve those subrequests and reason about them separately. A composite request is + * executed by its own transport action class (e.g. {@link org.elasticsearch.action.search.TransportMultiSearchAction}), which goes + * through all the subrequests and delegates their exection to the appropriate transport action (e.g. + * {@link org.elasticsearch.action.search.TransportSearchAction}) for each single item. */ public interface CompositeIndicesRequest { - /** * Returns the subrequests that a composite request is composed of */ diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/core/src/main/java/org/elasticsearch/action/DocWriteRequest.java new file mode 100644 index 00000000000..09db7089ff6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -0,0 +1,203 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action; + +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.VersionType; + +import java.io.IOException; +import java.util.Locale; + +/** + * Generic interface to group ActionRequest, which perform writes to a single document + * Action requests implementing this can be part of {@link org.elasticsearch.action.bulk.BulkRequest} + */ +public interface DocWriteRequest extends IndicesRequest { + + /** + * Get the index that this request operates on + * @return the index + */ + String index(); + + /** + * Get the type that this request operates on + * @return the type + */ + String type(); + + /** + * Get the id of the document for this request + * @return the id + */ + String id(); + + /** + * Get the options for this request + * @return the indices options + */ + IndicesOptions indicesOptions(); + + /** + * Set the routing for this request + * @return the Request + */ + T routing(String routing); + + /** + * Get the routing for this request + * @return the Routing + */ + String routing(); + + + /** + * Get the parent for this request + * @return the Parent + */ + String parent(); + + /** + * Get the document version for this request + * @return the document version + */ + long version(); + + /** + * Sets the version, which will perform the operation only if a matching + * version exists and no changes happened on the doc since then. + */ + T version(long version); + + /** + * Get the document version type for this request + * @return the document version type + */ + VersionType versionType(); + + /** + * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}. + */ + T versionType(VersionType versionType); + + /** + * Get the requested document operation type of the request + * @return the operation type {@link OpType} + */ + OpType opType(); + + /** + * Requested operation type to perform on the document + */ + enum OpType { + /** + * Index the source. If there an existing document with the id, it will + * be replaced. + */ + INDEX(0), + /** + * Creates the resource. Simply adds it to the index, if there is an existing + * document with the id, then it won't be removed. + */ + CREATE(1), + /** Updates a document */ + UPDATE(2), + /** Deletes a document */ + DELETE(3); + + private final byte op; + private final String lowercase; + + OpType(int op) { + this.op = (byte) op; + this.lowercase = this.toString().toLowerCase(Locale.ROOT); + } + + public byte getId() { + return op; + } + + public String getLowercase() { + return lowercase; + } + + public static OpType fromId(byte id) { + switch (id) { + case 0: return INDEX; + case 1: return CREATE; + case 2: return UPDATE; + case 3: return DELETE; + default: throw new IllegalArgumentException("Unknown opType: [" + id + "]"); + } + } + + public static OpType fromString(String sOpType) { + String lowerCase = sOpType.toLowerCase(Locale.ROOT); + for (OpType opType : OpType.values()) { + if (opType.getLowercase().equals(lowerCase)) { + return opType; + } + } + throw new IllegalArgumentException("Unknown opType: [" + sOpType + "]"); + } + } + + /** read a document write (index/delete/update) request */ + static DocWriteRequest readDocumentRequest(StreamInput in) throws IOException { + byte type = in.readByte(); + DocWriteRequest docWriteRequest; + if (type == 0) { + IndexRequest indexRequest = new IndexRequest(); + indexRequest.readFrom(in); + docWriteRequest = indexRequest; + } else if (type == 1) { + DeleteRequest deleteRequest = new DeleteRequest(); + deleteRequest.readFrom(in); + docWriteRequest = deleteRequest; + } else if (type == 2) { + UpdateRequest updateRequest = new UpdateRequest(); + updateRequest.readFrom(in); + docWriteRequest = updateRequest; + } else { + throw new IllegalStateException("invalid request type [" + type+ " ]"); + } + return docWriteRequest; + } + + /** write a document write (index/delete/update) request*/ + static void writeDocumentRequest(StreamOutput out, DocWriteRequest request) throws IOException { + if (request instanceof IndexRequest) { + out.writeByte((byte) 0); + ((IndexRequest) request).writeTo(out); + } else if (request instanceof DeleteRequest) { + out.writeByte((byte) 1); + ((DeleteRequest) request).writeTo(out); + } else if (request instanceof UpdateRequest) { + out.writeByte((byte) 2); + ((UpdateRequest) request).writeTo(out); + } else { + throw new IllegalStateException("invalid request [" + request.getClass().getSimpleName() + " ]"); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java b/core/src/main/java/org/elasticsearch/action/DocumentRequest.java deleted file mode 100644 index a90f013a6b9..00000000000 --- a/core/src/main/java/org/elasticsearch/action/DocumentRequest.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action; - -import org.elasticsearch.action.support.IndicesOptions; - -/** - * Generic interface to group ActionRequest, which work on single document level - * - * Forces this class return index/type/id getters - */ -public interface DocumentRequest extends IndicesRequest { - - /** - * Get the index that this request operates on - * @return the index - */ - String index(); - - /** - * Get the type that this request operates on - * @return the type - */ - String type(); - - /** - * Get the id of the document for this request - * @return the id - */ - String id(); - - /** - * Get the options for this request - * @return the indices options - */ - IndicesOptions indicesOptions(); - - /** - * Set the routing for this request - * @return the Request - */ - T routing(String routing); - - /** - * Get the routing for this request - * @return the Routing - */ - String routing(); - - - /** - * Get the parent for this request - * @return the Parent - */ - String parent(); - -} diff --git a/core/src/main/java/org/elasticsearch/action/FailedNodeException.java b/core/src/main/java/org/elasticsearch/action/FailedNodeException.java index f6e680f2dab..bf9aad0d39e 100644 --- a/core/src/main/java/org/elasticsearch/action/FailedNodeException.java +++ b/core/src/main/java/org/elasticsearch/action/FailedNodeException.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - * - */ public class FailedNodeException extends ElasticsearchException { private final String nodeId; diff --git a/core/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java b/core/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java index 00562af99c5..af40a7a672e 100644 --- a/core/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java +++ b/core/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java @@ -26,9 +26,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class NoShardAvailableActionException extends ElasticsearchException { public NoShardAvailableActionException(ShardId shardId) { diff --git a/core/src/main/java/org/elasticsearch/action/NoSuchNodeException.java b/core/src/main/java/org/elasticsearch/action/NoSuchNodeException.java index 3f8ce1441c5..0d800cd99c3 100644 --- a/core/src/main/java/org/elasticsearch/action/NoSuchNodeException.java +++ b/core/src/main/java/org/elasticsearch/action/NoSuchNodeException.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class NoSuchNodeException extends FailedNodeException { public NoSuchNodeException(String nodeId) { diff --git a/core/src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java b/core/src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java index f364bdb9aef..6e9ff68f5e9 100644 --- a/core/src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java +++ b/core/src/main/java/org/elasticsearch/action/PrimaryMissingActionException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class PrimaryMissingActionException extends ElasticsearchException { public PrimaryMissingActionException(String message) { diff --git a/core/src/main/java/org/elasticsearch/action/RoutingMissingException.java b/core/src/main/java/org/elasticsearch/action/RoutingMissingException.java index 86bca96d744..1ecd9593e1c 100644 --- a/core/src/main/java/org/elasticsearch/action/RoutingMissingException.java +++ b/core/src/main/java/org/elasticsearch/action/RoutingMissingException.java @@ -27,9 +27,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.util.Objects; -/** - * - */ public class RoutingMissingException extends ElasticsearchException { private final String type; diff --git a/core/src/main/java/org/elasticsearch/action/ThreadingModel.java b/core/src/main/java/org/elasticsearch/action/ThreadingModel.java index a4e020d9f08..996ebc9d6cc 100644 --- a/core/src/main/java/org/elasticsearch/action/ThreadingModel.java +++ b/core/src/main/java/org/elasticsearch/action/ThreadingModel.java @@ -20,9 +20,6 @@ package org.elasticsearch.action; -/** - * - */ public enum ThreadingModel { NONE((byte) 0), OPERATION((byte) 1), diff --git a/core/src/main/java/org/elasticsearch/action/TimestampParsingException.java b/core/src/main/java/org/elasticsearch/action/TimestampParsingException.java index 0764bb502c0..c8401969d7e 100644 --- a/core/src/main/java/org/elasticsearch/action/TimestampParsingException.java +++ b/core/src/main/java/org/elasticsearch/action/TimestampParsingException.java @@ -25,8 +25,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - */ public class TimestampParsingException extends ElasticsearchException { private final String timestamp; diff --git a/core/src/main/java/org/elasticsearch/action/UnavailableShardsException.java b/core/src/main/java/org/elasticsearch/action/UnavailableShardsException.java index e2cff9244ea..ac39bd91f2b 100644 --- a/core/src/main/java/org/elasticsearch/action/UnavailableShardsException.java +++ b/core/src/main/java/org/elasticsearch/action/UnavailableShardsException.java @@ -27,9 +27,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class UnavailableShardsException extends ElasticsearchException { public UnavailableShardsException(@Nullable ShardId shardId, String message, Object... args) { diff --git a/core/src/main/java/org/elasticsearch/action/ValidateActions.java b/core/src/main/java/org/elasticsearch/action/ValidateActions.java index d0ade0695fd..d748ba59e01 100644 --- a/core/src/main/java/org/elasticsearch/action/ValidateActions.java +++ b/core/src/main/java/org/elasticsearch/action/ValidateActions.java @@ -19,9 +19,6 @@ package org.elasticsearch.action; -/** - * - */ public class ValidateActions { public static ActionRequestValidationException addValidationError(String error, ActionRequestValidationException validationException) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index a304fa60cb7..5aa35a059fb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -127,7 +127,7 @@ public class TransportClusterAllocationExplainAction } /** - * Construct a {@code NodeExplanation} object for the given shard given all the metadata. This also attempts to construct the human + * Construct a {@code WeightedDecision} object for the given shard given all the metadata. This also attempts to construct the human * readable FinalDecision and final explanation as part of the explanation. */ public static NodeExplanation calculateNodeExplanation(ShardRouting shard, diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java index 9770b05bae0..786b3185116 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.health; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class ClusterHealthAction extends Action { public static final ClusterHealthAction INSTANCE = new ClusterHealthAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index 47e0ecd7f74..ef206d0183b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -34,9 +34,6 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; import java.util.concurrent.TimeUnit; -/** - * - */ public class ClusterHealthRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java index 1a82cf8cb1c..6716591f7a6 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java @@ -26,9 +26,6 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Priority; import org.elasticsearch.common.unit.TimeValue; -/** - * - */ public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestBuilder { public ClusterHealthRequestBuilder(ElasticsearchClient client, ClusterHealthAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index d483ae86bf7..e4a575dcf79 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -36,9 +36,6 @@ import java.io.IOException; import java.util.Locale; import java.util.Map; -/** - * - */ public class ClusterHealthResponse extends ActionResponse implements StatusToXContent { private String clusterName; private int numberOfPendingTasks = 0; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 93140794240..9773410aacc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -34,6 +34,8 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.ClusterServiceState; +import org.elasticsearch.cluster.service.ClusterStateStatus; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -44,9 +46,6 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -/** - * - */ public class TransportClusterHealthAction extends TransportMasterNodeReadAction { private final GatewayAllocator gatewayAllocator; @@ -143,7 +142,8 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< assert waitFor >= 0; final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext()); - final ClusterState state = observer.observedState(); + final ClusterServiceState observedState = observer.observedState(); + final ClusterState state = observedState.getClusterState(); if (request.timeout().millis() == 0) { listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0)); return; @@ -151,8 +151,8 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< final int concreteWaitFor = waitFor; final ClusterStateObserver.ChangePredicate validationPredicate = new ClusterStateObserver.ValidationPredicate() { @Override - protected boolean validate(ClusterState newState) { - return newState.status() == ClusterState.ClusterStateStatus.APPLIED && validateRequest(request, newState, concreteWaitFor); + protected boolean validate(ClusterServiceState newState) { + return newState.getClusterStateStatus() == ClusterStateStatus.APPLIED && validateRequest(request, newState.getClusterState(), concreteWaitFor); } }; @@ -174,7 +174,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< listener.onResponse(response); } }; - if (state.status() == ClusterState.ClusterStateStatus.APPLIED && validateRequest(request, state, concreteWaitFor)) { + if (observedState.getClusterStateStatus() == ClusterStateStatus.APPLIED && validateRequest(request, state, concreteWaitFor)) { stateListener.onNewClusterState(state); } else { observer.waitForNextChange(stateListener, validationPredicate, request.timeout()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java index 635be28a646..7268f962cee 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java @@ -26,8 +26,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - */ public class NodeHotThreads extends BaseNodeResponse { private String hotThreads; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java index 7010b4cb143..57ca90288ce 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class NodesHotThreadsAction extends Action { public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index e3df7f57312..60b03c710af 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -27,8 +27,6 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; import java.util.concurrent.TimeUnit; -/** - */ public class NodesHotThreadsRequest extends BaseNodesRequest { int threads = 3; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java index 60001f50b62..1709151e824 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java @@ -23,8 +23,6 @@ import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; -/** - */ public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder { public NodesHotThreadsRequestBuilder(ElasticsearchClient client, NodesHotThreadsAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java index 3136f2b6826..44c38cb4f9a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java @@ -28,8 +28,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; import java.util.List; -/** - */ public class NodesHotThreadsResponse extends BaseNodesResponse { NodesHotThreadsResponse() { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index 73403f40318..da45a3e4027 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -38,9 +38,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.List; -/** - * - */ public class TransportNodesHotThreadsAction extends TransportNodesAction { public static final NodesInfoAction INSTANCE = new NodesInfoAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java index 16befb79aab..12221016160 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java @@ -22,9 +22,6 @@ package org.elasticsearch.action.admin.cluster.node.info; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder { public NodesInfoRequestBuilder(ElasticsearchClient client, NodesInfoAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java index 2d273bef2c0..f233494e1c6 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java @@ -34,9 +34,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class NodesInfoResponse extends BaseNodesResponse implements ToXContent { public NodesInfoResponse() { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 028198cf831..c26554b25e0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -36,9 +36,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.List; -/** - * - */ public class TransportNodesInfoAction extends TransportNodesAction attrEntry : getNode().getAttributes().entrySet()) { - builder.field(attrEntry.getKey(), attrEntry.getValue()); - } - builder.endObject(); - } + builder.startArray("roles"); + for (DiscoveryNode.Role role : getNode().getRoles()) { + builder.value(role.getRoleName()); } + builder.endArray(); + + if (!getNode().getAttributes().isEmpty()) { + builder.startObject("attributes"); + for (Map.Entry attrEntry : getNode().getAttributes().entrySet()) { + builder.field(attrEntry.getKey(), attrEntry.getValue()); + } + builder.endObject(); + } + if (getIndices() != null) { getIndices().toXContent(builder, params); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java index 3c322e3335e..857922bf9eb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.node.stats; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class NodesStatsAction extends Action { public static final NodesStatsAction INSTANCE = new NodesStatsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index 027e6122681..d4005c84d09 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -23,9 +23,6 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder { public NodesStatsRequestBuilder(ElasticsearchClient client, NodesStatsAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java index 1a9023ab93c..c4553304f41 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java @@ -31,9 +31,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; import java.util.List; -/** - * - */ public class NodesStatsResponse extends BaseNodesResponse implements ToXContent { NodesStatsResponse() { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 5863e54d08f..b4cef38d28d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -36,9 +36,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.List; -/** - * - */ public class TransportNodesStatsAction extends TransportNodesAction listener) { final BanLock banLock = new BanLock(nodes -> removeBanOnNodes(cancellableTask, nodes)); Set childNodes = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished); if (childNodes != null) { if (childNodes.isEmpty()) { logger.trace("cancelling task {} with no children", cancellableTask.getId()); - return cancellableTask.taskInfo(clusterService.localNode(), false); + listener.onResponse(cancellableTask.taskInfo(clusterService.localNode().getId(), false)); } else { logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes); setBanOnNodes(request.getReason(), cancellableTask, childNodes, banLock); - return cancellableTask.taskInfo(clusterService.localNode(), false); + listener.onResponse(cancellableTask.taskInfo(clusterService.localNode().getId(), false)); } } else { logger.trace("task {} is already cancelled", cancellableTask.getId()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 670812a0b48..5711a2a214d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -150,7 +150,7 @@ public class TransportGetTaskAction extends HandledTransportAction { public static long waitForCompletionTimeout(TimeValue timeout) { if (timeout == null) { @@ -72,8 +70,8 @@ public class TransportListTasksAction extends TransportTasksAction listener) { + listener.onResponse(task.taskInfo(clusterService.localNode().getId(), request.getDetailed())); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java index 7aa6dc25cdc..70a39d2d329 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.reroute; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class ClusterRerouteAction extends Action { public static final ClusterRerouteAction INSTANCE = new ClusterRerouteAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 7aade821f83..6fe497dd16f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -39,8 +39,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -/** - */ public class TransportClusterRerouteAction extends TransportMasterNodeAction { private final AllocationService allocationService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java index 15b3e70dd04..bb0c0b08a92 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.settings; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class ClusterUpdateSettingsAction extends Action { public static final ClusterUpdateSettingsAction INSTANCE = new ClusterUpdateSettingsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 7e77d222430..ab6cdb94e1f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -42,19 +42,19 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -/** - * - */ -public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAction { +public class TransportClusterUpdateSettingsAction extends + TransportMasterNodeAction { private final AllocationService allocationService; private final ClusterSettings clusterSettings; @Inject - public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - AllocationService allocationService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettings clusterSettings) { - super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterUpdateSettingsRequest::new); + public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, AllocationService allocationService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettings clusterSettings) { + super(settings, ClusterUpdateSettingsAction.NAME, false, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, ClusterUpdateSettingsRequest::new); this.allocationService = allocationService; this.clusterSettings = clusterSettings; } @@ -67,8 +67,11 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override protected ClusterBlockException checkBlock(ClusterUpdateSettingsRequest request, ClusterState state) { // allow for dedicated changes to the metadata blocks, so we don't block those to allow to "re-enable" it - if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())) || - request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings())) { + if ((request.transientSettings().getAsMap().isEmpty() && + request.persistentSettings().getAsMap().size() == 1 && + MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())) || + (request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && + MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings()))) { return null; } return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); @@ -81,7 +84,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct } @Override - protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener listener) { + protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, + final ActionListener listener) { final SettingsUpdater updater = new SettingsUpdater(clusterSettings); clusterService.submitStateUpdateTask("cluster_update_settings", new AckedClusterStateUpdateTask(Priority.IMMEDIATE, request, listener) { @@ -117,7 +121,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct // so we should *not* execute the reroute. if (!clusterService.state().nodes().isLocalNodeElectedMaster()) { logger.debug("Skipping reroute after cluster update settings, because node is no longer master"); - listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate())); + listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), + updater.getPersistentUpdate())); return; } @@ -135,15 +140,18 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct } @Override - //we return when the cluster reroute is acked or it times out but the acknowledged flag depends on whether the update settings was acknowledged + // we return when the cluster reroute is acked or it times out but the acknowledged flag depends on whether the + // update settings was acknowledged protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { - return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate()); + return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, updater.getTransientUpdates(), + updater.getPersistentUpdate()); } @Override public void onNoLongerMaster(String source) { logger.debug("failed to preform reroute after cluster settings were updated - current node is no longer a master"); - listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate())); + listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), + updater.getPersistentUpdate())); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java index 28f7557a2e2..cb3240a7929 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.shards; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class ClusterSearchShardsAction extends Action { public static final ClusterSearchShardsAction INSTANCE = new ClusterSearchShardsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java index ccb4d32465e..12d9ffe7c38 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java @@ -30,8 +30,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - */ public class ClusterSearchShardsGroup implements Streamable, ToXContent { private ShardId shardId; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index 21ecf8a4c4f..8dc747474c4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -30,8 +30,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - */ public class ClusterSearchShardsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices; @Nullable diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java index 0b9c9d044e7..030d5db6472 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java @@ -23,8 +23,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder { public ClusterSearchShardsRequestBuilder(ElasticsearchClient client, ClusterSearchShardsAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index 5f45025351e..140a0141a2d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -28,8 +28,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -/** - */ public class ClusterSearchShardsResponse extends ActionResponse implements ToXContent { private ClusterSearchShardsGroup[] groups; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 2f9a6e7dede..087597d47fc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -42,8 +42,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; -/** - */ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadAction { @Inject diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index ad8cb1ae88e..573bb0ea263 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -39,7 +39,7 @@ import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.HashMap; -import java.util.LinkedHashSet; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -80,25 +80,26 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction snapshotInfoBuilder = new ArrayList<>(); - if (isAllSnapshots(request.snapshots())) { - snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(repository)); - snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository, - snapshotsService.snapshotIds(repository), - request.ignoreUnavailable())); - } else if (isCurrentSnapshots(request.snapshots())) { - snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(repository)); - } else { - final Map allSnapshotIds = new HashMap<>(); - for (SnapshotInfo snapshotInfo : snapshotsService.currentSnapshots(repository)) { - SnapshotId snapshotId = snapshotInfo.snapshotId(); - allSnapshotIds.put(snapshotId.getName(), snapshotId); - } + final Map allSnapshotIds = new HashMap<>(); + final List currentSnapshotIds = new ArrayList<>(); + for (SnapshotInfo snapshotInfo : snapshotsService.currentSnapshots(repository)) { + SnapshotId snapshotId = snapshotInfo.snapshotId(); + allSnapshotIds.put(snapshotId.getName(), snapshotId); + currentSnapshotIds.add(snapshotId); + } + if (isCurrentSnapshotsOnly(request.snapshots()) == false) { for (SnapshotId snapshotId : snapshotsService.snapshotIds(repository)) { allSnapshotIds.put(snapshotId.getName(), snapshotId); } - final Set toResolve = new LinkedHashSet<>(); // maintain order + } + final Set toResolve = new HashSet<>(); + if (isAllSnapshots(request.snapshots())) { + toResolve.addAll(allSnapshotIds.values()); + } else { for (String snapshotOrPattern : request.snapshots()) { - if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) { + if (GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshotOrPattern)) { + toResolve.addAll(currentSnapshotIds); + } else if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) { if (allSnapshotIds.containsKey(snapshotOrPattern)) { toResolve.add(allSnapshotIds.get(snapshotOrPattern)); } else if (request.ignoreUnavailable() == false) { @@ -113,12 +114,12 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction(toResolve), request.ignoreUnavailable())); } + + snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository, new ArrayList<>(toResolve), request.ignoreUnavailable())); listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder)); } catch (Exception e) { listener.onFailure(e); @@ -129,7 +130,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction() { + restoreService.restoreSnapshot(restoreRequest, new ActionListener() { @Override - public void onResponse(RestoreInfo restoreInfo) { - if (restoreInfo == null && request.waitForCompletion()) { - restoreService.addListener(new ActionListener() { + public void onResponse(RestoreCompletionResponse restoreCompletionResponse) { + if (restoreCompletionResponse.getRestoreInfo() == null && request.waitForCompletion()) { + final Snapshot snapshot = restoreCompletionResponse.getSnapshot(); + + ClusterStateListener clusterStateListener = new ClusterStateListener() { @Override - public void onResponse(RestoreService.RestoreCompletionResponse restoreCompletionResponse) { - final Snapshot snapshot = restoreCompletionResponse.getSnapshot(); - if (snapshot.getRepository().equals(request.repository()) && - snapshot.getSnapshotId().getName().equals(request.snapshot())) { - listener.onResponse(new RestoreSnapshotResponse(restoreCompletionResponse.getRestoreInfo())); - restoreService.removeListener(this); + public void clusterChanged(ClusterChangedEvent changedEvent) { + final RestoreInProgress.Entry prevEntry = restoreInProgress(changedEvent.previousState(), snapshot); + final RestoreInProgress.Entry newEntry = restoreInProgress(changedEvent.state(), snapshot); + if (prevEntry == null) { + // When there is a master failure after a restore has been started, this listener might not be registered + // on the current master and as such it might miss some intermediary cluster states due to batching. + // Clean up listener in that case and acknowledge completion of restore operation to client. + clusterService.remove(this); + listener.onResponse(new RestoreSnapshotResponse(null)); + } else if (newEntry == null) { + clusterService.remove(this); + ImmutableOpenMap shards = prevEntry.shards(); + assert prevEntry.state().completed() : "expected completed snapshot state but was " + prevEntry.state(); + assert RestoreService.completed(shards) : "expected all restore entries to be completed"; + RestoreInfo ri = new RestoreInfo(prevEntry.snapshot().getSnapshotId().getName(), + prevEntry.indices(), + shards.size(), + shards.size() - RestoreService.failedShards(shards)); + RestoreSnapshotResponse response = new RestoreSnapshotResponse(ri); + logger.debug("restore of [{}] completed", snapshot); + listener.onResponse(response); + } else { + // restore not completed yet, wait for next cluster state update } } + }; - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + clusterService.addLast(clusterStateListener); } else { - listener.onResponse(new RestoreSnapshotResponse(restoreInfo)); + listener.onResponse(new RestoreSnapshotResponse(restoreCompletionResponse.getRestoreInfo())); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java index efbc82c9b6a..d96daa86f76 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java @@ -20,8 +20,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -/** - */ public enum SnapshotIndexShardStage { /** diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index a7cebca0aa6..462c5cadabd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -29,8 +29,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import java.io.IOException; -/** - */ public class SnapshotIndexShardStatus extends BroadcastShardResponse implements ToXContent { private SnapshotIndexShardStage stage = SnapshotIndexShardStage.INIT; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java index 2e0e3f993a1..a1eaaf9560a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java @@ -28,8 +28,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import java.io.IOException; -/** - */ public class SnapshotStats implements Streamable, ToXContent { private long startTime; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index cf00784dc3f..c73ae48d070 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -56,8 +56,6 @@ import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; -/** - */ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction { private final SnapshotsService snapshotsService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java index 70786f99e0d..959658f6e7c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.state; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class ClusterStateAction extends Action { public static final ClusterStateAction INSTANCE = new ClusterStateAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java index f5dc975f0a3..e6b468b804b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java @@ -29,9 +29,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - * - */ public class ClusterStateRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private boolean routingTable = true; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index e29798bb246..347a51afa13 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -23,9 +23,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder { public ClusterStateRequestBuilder(ElasticsearchClient client, ClusterStateAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java index 2a2f4707f69..f4d8b891b86 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java @@ -27,9 +27,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - * - */ public class ClusterStateResponse extends ActionResponse { private ClusterName clusterName; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index a1b10c9c4f4..6c965cb3bbd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -36,9 +36,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -/** - * - */ public class TransportClusterStateAction extends TransportMasterNodeReadAction { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java index 1cddf37ad31..80ae0b6911b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.stats; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class ClusterStatsAction extends Action { public static final ClusterStatsAction INSTANCE = new ClusterStatsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index 6102f13754a..0d545ddfa70 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -75,10 +74,7 @@ public class ClusterStatsNodes implements ToXContent { // now do the stats that should be deduped by hardware (implemented by ip deduping) TransportAddress publishAddress = nodeResponse.nodeInfo().getTransport().address().publishAddress(); - InetAddress inetAddress = null; - if (publishAddress.uniqueAddressTypeId() == 1) { - inetAddress = ((InetSocketTransportAddress) publishAddress).address().getAddress(); - } + final InetAddress inetAddress = publishAddress.address().getAddress(); if (!seenAddresses.add(inetAddress)) { continue; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java index db64dc94919..f05d998ee04 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java @@ -22,9 +22,6 @@ package org.elasticsearch.action.admin.cluster.stats; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder { public ClusterStatsRequestBuilder(ElasticsearchClient client, ClusterStatsAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index efc72d104f8..4ce9c7e6ff5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -33,26 +33,22 @@ import java.io.IOException; import java.util.List; import java.util.Locale; -/** - * - */ public class ClusterStatsResponse extends BaseNodesResponse implements ToXContent { ClusterStatsNodes nodesStats; ClusterStatsIndices indicesStats; - String clusterUUID; ClusterHealthStatus status; long timestamp; - ClusterStatsResponse() { } - public ClusterStatsResponse(long timestamp, ClusterName clusterName, String clusterUUID, - List nodes, List failures) { + public ClusterStatsResponse(long timestamp, + ClusterName clusterName, + List nodes, + List failures) { super(clusterName, nodes, failures); this.timestamp = timestamp; - this.clusterUUID = clusterUUID; nodesStats = new ClusterStatsNodes(nodes); indicesStats = new ClusterStatsIndices(nodes); for (ClusterStatsNodeResponse response : nodes) { @@ -84,7 +80,6 @@ public class ClusterStatsResponse extends BaseNodesResponse { @@ -75,8 +72,11 @@ public class TransportClusterStatsAction extends TransportNodesAction responses, List failures) { - return new ClusterStatsResponse(System.currentTimeMillis(), clusterService.getClusterName(), - clusterService.state().metaData().clusterUUID(), responses, failures); + return new ClusterStatsResponse( + System.currentTimeMillis(), + clusterService.getClusterName(), + responses, + failures); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java index e7686387061..61cb8561ea5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.storedscripts; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class DeleteStoredScriptAction extends Action { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java index 178598fad8b..7b9eb5209cc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.storedscripts; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class GetStoredScriptAction extends Action { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java index 264d37a42e0..d01a12f58a2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java @@ -23,8 +23,6 @@ import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class PutStoredScriptAction extends Action { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java index 1e3eb3f6e81..0b420a4e505 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.tasks; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class PendingClusterTasksAction extends Action { public static final PendingClusterTasksAction INSTANCE = new PendingClusterTasksAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java index 09f25feb90f..738276a9907 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.tasks; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadRequest; -/** - */ public class PendingClusterTasksRequest extends MasterNodeReadRequest { @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java index 1392d973eb3..029ba7414ab 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.cluster.tasks; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder { public PendingClusterTasksRequestBuilder(ElasticsearchClient client, PendingClusterTasksAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java index 35d5b3efb7b..bb1afe5e19e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java @@ -25,15 +25,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; -/** - */ public class PendingClusterTasksResponse extends ActionResponse implements Iterable, ToXContent { private List pendingTasks; @@ -61,7 +58,8 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera return pendingTasks.iterator(); } - public String prettyPrint() { + @Override + public String toString() { StringBuilder sb = new StringBuilder(); sb.append("tasks: (").append(pendingTasks.size()).append("):\n"); for (PendingClusterTask pendingClusterTask : this) { @@ -70,19 +68,6 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera return sb.toString(); } - @Override - public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return builder.string(); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; - } - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startArray(Fields.TASKS); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 370b668f659..c15758de3cb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -35,8 +35,6 @@ import org.elasticsearch.transport.TransportService; import java.util.List; -/** - */ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadAction { private final ClusterService clusterService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java index 3cf0ca574fb..d4045b8b22e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.alias; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class IndicesAliasesAction extends Action { public static final IndicesAliasesAction INSTANCE = new IndicesAliasesAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index 63493210f7c..524a21ec632 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -20,12 +20,9 @@ package org.elasticsearch.action.admin.indices.alias; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.AliasesRequest; -import org.elasticsearch.action.CompositeIndicesRequest; -import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.cluster.metadata.AliasAction; @@ -63,7 +60,7 @@ import static org.elasticsearch.common.xcontent.ObjectParser.fromList; /** * A request to add/remove aliases for one or more indices. */ -public class IndicesAliasesRequest extends AcknowledgedRequest implements CompositeIndicesRequest { +public class IndicesAliasesRequest extends AcknowledgedRequest { private List allAliasActions = new ArrayList<>(); //indices options that require every specified index to exist, expand wildcards only to open indices and @@ -502,9 +499,4 @@ public class IndicesAliasesRequest extends AcknowledgedRequest subRequests() { - return allAliasActions; - } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java index 23dc1e13a56..70467f4343c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java @@ -23,8 +23,6 @@ import org.elasticsearch.action.Action; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class AliasesExistAction extends Action { public static final AliasesExistAction INSTANCE = new AliasesExistAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java index 40f421006e3..d889d15ce9c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.alias.exists; import org.elasticsearch.action.admin.indices.alias.get.BaseAliasesRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class AliasesExistRequestBuilder extends BaseAliasesRequestBuilder { public AliasesExistRequestBuilder(ElasticsearchClient client, AliasesExistAction action, String... aliases) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistResponse.java index 65474c8e8ab..2d430bf9820 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistResponse.java @@ -25,8 +25,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - */ public class AliasesExistResponse extends ActionResponse { private boolean exists; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java index 8ca09dbb67e..9a104598e8a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java @@ -32,8 +32,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -/** - */ public class TransportAliasesExistAction extends TransportMasterNodeReadAction { @Inject diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java index bcfdb0e10d8..f4e3868fda9 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java @@ -26,8 +26,6 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBui import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; -/** - */ public abstract class BaseAliasesRequestBuilder> extends MasterNodeReadOperationRequestBuilder { public BaseAliasesRequestBuilder(ElasticsearchClient client, Action action, String... aliases) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java index 188f72e1e34..71badddacab 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.alias.get; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class GetAliasesAction extends Action { public static final GetAliasesAction INSTANCE = new GetAliasesAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java index fd3a93b69d9..bcb887b8383 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -28,8 +28,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - */ public class GetAliasesRequest extends MasterNodeReadRequest implements AliasesRequest { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java index 4a6f9885008..fa7101eba5a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java @@ -21,8 +21,6 @@ package org.elasticsearch.action.admin.indices.alias.get; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class GetAliasesRequestBuilder extends BaseAliasesRequestBuilder { public GetAliasesRequestBuilder(ElasticsearchClient client, GetAliasesAction action, String... aliases) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index e23faa1cbbf..85282788898 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -31,8 +31,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -/** - */ public class GetAliasesResponse extends ActionResponse { private ImmutableOpenMap> aliases = ImmutableOpenMap.of(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 061f916c2e0..e871fc04f97 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -35,8 +35,6 @@ import org.elasticsearch.transport.TransportService; import java.util.List; -/** - */ public class TransportGetAliasesAction extends TransportMasterNodeReadAction { @Inject diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java index b29ba7507f8..5b92af47b37 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.analyze; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class AnalyzeAction extends Action { public static final AnalyzeAction INSTANCE = new AnalyzeAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java index 78d06185423..344681b997e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java @@ -23,9 +23,6 @@ import org.elasticsearch.client.ElasticsearchClient; import java.util.Map; -/** - * - */ public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder { public AnalyzeRequestBuilder(ElasticsearchClient client, AnalyzeAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java index 48db340a1c7..302597e0e09 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java @@ -32,9 +32,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -/** - * - */ public class AnalyzeResponse extends ActionResponse implements Iterable, ToXContent { public static class AnalyzeToken implements Streamable, ToXContent { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java index 0880c66802e..f8e35168bcb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.cache.clear; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class ClearIndicesCacheAction extends Action { public static final ClearIndicesCacheAction INSTANCE = new ClearIndicesCacheAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java index 3c068b7ad21..c9e04d53064 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - * - */ public class ClearIndicesCacheRequest extends BroadcastRequest { private boolean queryCache = false; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java index 8061ec89d92..8cfe3d7b909 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java @@ -22,9 +22,6 @@ package org.elasticsearch.action.admin.indices.cache.clear; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder { public ClearIndicesCacheRequestBuilder(ElasticsearchClient client, ClearIndicesCacheAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java index fcb38b01da5..44151f2cfe9 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.close; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class CloseIndexAction extends Action { public static final CloseIndexAction INSTANCE = new CloseIndexAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java index aa3d3ac92ad..3bac77241bf 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.create; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class CreateIndexAction extends Action { public static final CreateIndexAction INSTANCE = new CreateIndexAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index 7b3b2a0a2f0..a2290a5e255 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -41,6 +41,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private final TransportMessage originalMessage; private final String cause; private final String index; + private final String providedName; private final boolean updateAllTypes; private Index shrinkFrom; @@ -59,11 +60,13 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; - public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) { + public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName, + boolean updateAllTypes) { this.originalMessage = originalMessage; this.cause = cause; this.index = index; this.updateAllTypes = updateAllTypes; + this.providedName = providedName; } public CreateIndexClusterStateUpdateRequest settings(Settings settings) { @@ -151,6 +154,14 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ return updateAllTypes; } + /** + * The name that was provided by the user. This might contain a date math expression. + * @see IndexMetaData#SETTING_INDEX_PROVIDED_NAME + */ + public String getProvidedName() { + return providedName; + } + public ActiveShardCount waitForActiveShards() { return waitForActiveShards; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index d3ce1975e89..354dcf23873 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -72,7 +72,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction { public static final DeleteIndexAction INSTANCE = new DeleteIndexAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index 9e5dc88b983..a75bcd77748 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -24,9 +24,6 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; -/** - * - */ public class DeleteIndexRequestBuilder extends MasterNodeOperationRequestBuilder { public DeleteIndexRequestBuilder(ElasticsearchClient client, DeleteIndexAction action, String... indices) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java index b9bfa00d7a2..aaabcec516b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.exists.indices; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class IndicesExistsAction extends Action { public static final IndicesExistsAction INSTANCE = new IndicesExistsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java index c448c936766..2574719aa61 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java @@ -61,8 +61,15 @@ public class IndicesExistsRequest extends MasterNodeReadRequest { public IndicesExistsRequestBuilder(ElasticsearchClient client, IndicesExistsAction action, String... indices) { @@ -38,12 +34,18 @@ public class IndicesExistsRequestBuilder extends MasterNodeReadOperationRequestB } /** - * Specifies what type of requested indices to ignore and wildcard indices expressions. - *

- * For example indices that don't exist. + * Controls whether wildcard expressions will be expanded to existing open indices */ - public IndicesExistsRequestBuilder setIndicesOptions(IndicesOptions options) { - request.indicesOptions(options); + public IndicesExistsRequestBuilder setExpandWildcardsOpen(boolean expandWildcardsOpen) { + request.expandWilcardsOpen(expandWildcardsOpen); + return this; + } + + /** + * Controls whether wildcard expressions will be expanded to existing closed indices + */ + public IndicesExistsRequestBuilder setExpandWildcardsClosed(boolean expandWildcardsClosed) { + request.expandWilcardsClosed(expandWildcardsClosed); return this; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java index b7fea539d13..147d7b8f1ab 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java @@ -21,8 +21,6 @@ package org.elasticsearch.action.admin.indices.exists.types; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class TypesExistsAction extends Action { public static final TypesExistsAction INSTANCE = new TypesExistsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequest.java index 9aba8ec44ed..0e099da8488 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequest.java @@ -29,8 +29,6 @@ import java.io.IOException; import static org.elasticsearch.action.ValidateActions.addValidationError; -/** - */ public class TypesExistsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java index 4cc116fd1dd..1812cfae1a7 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class FlushAction extends Action { public static final FlushAction INSTANCE = new FlushAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java index 39a80fd3ca5..bddf5ce0449 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java @@ -22,9 +22,6 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class FlushRequestBuilder extends BroadcastOperationRequestBuilder { public FlushRequestBuilder(ElasticsearchClient client, FlushAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 570307a717d..1ec7186393f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -33,9 +33,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -/** - * - */ public class TransportShardFlushAction extends TransportReplicationAction { public static final String NAME = FlushAction.NAME + "[s]"; @@ -54,18 +51,16 @@ public class TransportShardFlushAction extends TransportReplicationAction { public static final ForceMergeAction INSTANCE = new ForceMergeAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java index 74111c82b2f..f692c1e0263 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.get; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class GetIndexAction extends Action { public static final GetIndexAction INSTANCE = new GetIndexAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java index 7ffc8533aac..677bbeee485 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -23,9 +23,6 @@ import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class GetIndexRequestBuilder extends ClusterInfoRequestBuilder { public GetIndexRequestBuilder(ElasticsearchClient client, GetIndexAction action, String... indices) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java index 520ed7b2cd4..410292ca688 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class GetFieldMappingsAction extends Action { public static final GetFieldMappingsAction INSTANCE = new GetFieldMappingsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java index d7dbebc600c..5a9425cadec 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class GetMappingsAction extends Action { public static final GetMappingsAction INSTANCE = new GetMappingsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java index 15222cf83d7..dc4836286cd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.info.ClusterInfoRequest; -/** - */ public class GetMappingsRequest extends ClusterInfoRequest { @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 0471a29c82f..7ecb6713953 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder { public GetMappingsRequestBuilder(ElasticsearchClient client, GetMappingsAction action, String... indices) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java index 30e9e24c493..e092f1f148d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -28,8 +28,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - */ public class GetMappingsResponse extends ActionResponse { private ImmutableOpenMap> mappings = ImmutableOpenMap.of(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java index a69dd2ed437..8729b60e3bf 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java @@ -37,8 +37,6 @@ import java.util.concurrent.atomic.AtomicReferenceArray; import static java.util.Collections.unmodifiableMap; -/** - */ public class TransportGetFieldMappingsAction extends HandledTransportAction { private final ClusterService clusterService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index 293f5a0e677..363e935ca56 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -34,8 +34,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -/** - */ public class TransportGetMappingsAction extends TransportClusterInfoAction { @Inject diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java index 5ed79ceca98..5bcff09e01a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class PutMappingAction extends Action { public static final PutMappingAction INSTANCE = new PutMappingAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java index c12e8d23c9c..c8553322d30 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.open; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class OpenIndexAction extends Action { public static final OpenIndexAction INSTANCE = new OpenIndexAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index cf9f5681953..d4b8d3532e9 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -55,19 +54,16 @@ public class TransportShardRefreshAction } @Override - protected PrimaryResult shardOperationOnPrimary(BasicReplicationRequest shardRequest) { - IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); - indexShard.refresh("api"); - logger.trace("{} refresh request executed on primary", indexShard.shardId()); + protected PrimaryResult shardOperationOnPrimary(BasicReplicationRequest shardRequest, IndexShard primary) { + primary.refresh("api"); + logger.trace("{} refresh request executed on primary", primary.shardId()); return new PrimaryResult(shardRequest, new ReplicationResponse()); } @Override - protected ReplicaResult shardOperationOnReplica(BasicReplicationRequest request) { - final ShardId shardId = request.shardId(); - IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); - indexShard.refresh("api"); - logger.trace("{} refresh request executed on replica", indexShard.shardId()); + protected ReplicaResult shardOperationOnReplica(BasicReplicationRequest request, IndexShard replica) { + replica.refresh("api"); + logger.trace("{} refresh request executed on replica", replica.shardId()); return new ReplicaResult(); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java index dd9d50dac73..f66c77968f1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class RolloverAction extends Action { public static final RolloverAction INSTANCE = new RolloverAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index eaf9025bf04..9bd08e3618d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -44,6 +44,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.DocsStats; +import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -61,7 +62,7 @@ import static java.util.Collections.unmodifiableList; */ public class TransportRolloverAction extends TransportMasterNodeAction { - private static final Pattern INDEX_NAME_PATTERN = Pattern.compile("^.*-(\\d)+$"); + private static final Pattern INDEX_NAME_PATTERN = Pattern.compile("^.*-\\d+$"); private final MetaDataCreateIndexService createIndexService; private final MetaDataIndexAliasesService indexAliasesService; private final ActiveShardsObserver activeShardsObserver; @@ -106,23 +107,29 @@ public class TransportRolloverAction extends TransportMasterNodeAction() { @Override public void onResponse(IndicesStatsResponse statsResponse) { final Set conditionResults = evaluateConditions(rolloverRequest.getConditions(), statsResponse.getTotal().getDocs(), metaData.index(sourceIndexName)); - final String rolloverIndexName = (rolloverRequest.getNewIndexName() != null) - ? rolloverRequest.getNewIndexName() - : generateRolloverIndexName(sourceIndexName); + if (rolloverRequest.isDryRun()) { listener.onResponse( new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false, false, false)); return; } if (conditionResults.size() == 0 || conditionResults.stream().anyMatch(result -> result.matched)) { - CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(rolloverIndexName, rolloverRequest); + CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(unresolvedName, rolloverIndexName, + rolloverRequest); createIndexService.createIndex(updateRequest, ActionListener.wrap(createIndexClusterStateUpdateResponse -> { // switch the alias to point to the newly created index indexAliasesService.indicesAliases( @@ -145,7 +152,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction" : ""); + return newName; } else { - throw new IllegalArgumentException("index name [" + sourceIndexName + "] does not match pattern '^.*-(\\d)+$'"); + throw new IllegalArgumentException("index name [" + sourceIndexName + "] does not match pattern '^.*-\\d+$'"); } } @@ -203,14 +215,14 @@ public class TransportRolloverAction extends TransportMasterNodeAction { public static final IndicesSegmentsAction INSTANCE = new IndicesSegmentsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java index d839e2a25c3..ce4a5705168 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java @@ -22,9 +22,6 @@ package org.elasticsearch.action.admin.indices.segments; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class IndicesSegmentsRequestBuilder extends BroadcastOperationRequestBuilder { public IndicesSegmentsRequestBuilder(ElasticsearchClient client, IndicesSegmentsAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index 8df46719c7b..350e8dffa19 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -41,9 +41,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.List; -/** - * - */ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeAction { private final IndicesService indicesService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java index 448a5e25fbd..1abf8e746fb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.settings.get; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class GetSettingsAction extends Action { public static final GetSettingsAction INSTANCE = new GetSettingsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java index eb981491e52..d15da04acab 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java @@ -30,8 +30,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - */ public class GetSettingsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java index 5a2ca7aef15..2fff2eca0c2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java @@ -24,8 +24,6 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBui import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; -/** - */ public class GetSettingsRequestBuilder extends MasterNodeReadOperationRequestBuilder { public GetSettingsRequestBuilder(ElasticsearchClient client, GetSettingsAction action, String... indices) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java index c45c7d9f545..0a3229dcaf1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -28,8 +28,6 @@ import org.elasticsearch.common.settings.Settings; import java.io.IOException; -/** - */ public class GetSettingsResponse extends ActionResponse { private ImmutableOpenMap indexToSettings = ImmutableOpenMap.of(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java index f09d3fb559c..6e6d3eaee98 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java @@ -40,8 +40,6 @@ import org.elasticsearch.transport.TransportService; import java.util.Map; -/** - */ public class TransportGetSettingsAction extends TransportMasterNodeReadAction { private final SettingsFilter settingsFilter; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index f9ebff06636..67099b4d100 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -38,9 +38,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -/** - * - */ public class TransportUpdateSettingsAction extends TransportMasterNodeAction { private final MetaDataUpdateSettingsService updateSettingsService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java index 17001b7376d..a23874831c2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.settings.put; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class UpdateSettingsAction extends Action { public static final UpdateSettingsAction INSTANCE = new UpdateSettingsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java index 4c09241ad75..8b5b4670e3c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.shrink; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class ShrinkAction extends Action { public static final ShrinkAction INSTANCE = new ShrinkAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java index 4667f1e9825..6d27b03db63 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java @@ -104,10 +104,10 @@ public class TransportShrinkAction extends TransportMasterNodeAction perShardDocStats, IndexNameExpressionResolver indexNameExpressionResolver) { - final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(shrinkReqeust.getSourceIndex()); - final CreateIndexRequest targetIndex = shrinkReqeust.getShrinkIndexRequest(); + final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(shrinkRequest.getSourceIndex()); + final CreateIndexRequest targetIndex = shrinkRequest.getShrinkIndexRequest(); final String targetIndexName = indexNameExpressionResolver.resolveDateMathExpression(targetIndex.index()); final IndexMetaData metaData = state.metaData().index(sourceIndex); final Settings targetIndexSettings = Settings.builder().put(targetIndex.settings()) @@ -137,7 +137,7 @@ public class TransportShrinkAction extends TransportMasterNodeAction { private final String index; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java index d83e368b216..1bf17115afb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class IndicesStatsAction extends Action { public static final IndicesStatsAction INSTANCE = new IndicesStatsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 2caa0da9569..3cbd7db66aa 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -38,8 +38,6 @@ import java.util.Set; import static java.util.Collections.unmodifiableMap; -/** - */ public class IndicesStatsResponse extends BroadcastResponse implements ToXContent { private ShardStats[] shards; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java index f4e38867096..c503da12317 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java @@ -32,8 +32,6 @@ import org.elasticsearch.index.shard.ShardPath; import java.io.IOException; -/** - */ public class ShardStats implements Streamable, ToXContent { private ShardRouting shardRouting; private CommonStats commonStats; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 900182ca3ee..bed820189d1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -42,8 +42,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.List; -/** - */ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction { private final IndicesService indicesService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java index 570ced293d8..3d871db3f4b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.template.delete; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class DeleteIndexTemplateAction extends Action { public static final DeleteIndexTemplateAction INSTANCE = new DeleteIndexTemplateAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index 670837e8047..620c432fe98 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -21,9 +21,6 @@ package org.elasticsearch.action.admin.indices.template.delete; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder { public DeleteIndexTemplateRequestBuilder(ElasticsearchClient client, DeleteIndexTemplateAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java index b4db5e0529f..545601f0108 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java @@ -21,9 +21,6 @@ package org.elasticsearch.action.admin.indices.template.get; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class GetIndexTemplatesAction extends Action { public static final GetIndexTemplatesAction INSTANCE = new GetIndexTemplatesAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index 93e02be2ec7..5068f11a0d2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -21,9 +21,6 @@ package org.elasticsearch.action.admin.indices.template.get; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder { public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, GetIndexTemplatesAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index 672ca1a9080..294550c9a62 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -38,9 +38,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -/** - * - */ public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadAction { @Inject diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java index 51adc0b5cfc..a442b5463ba 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.template.put; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class PutIndexTemplateAction extends Action { public static final PutIndexTemplateAction INSTANCE = new PutIndexTemplateAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index 8acc2b3f610..77343277f63 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -27,9 +27,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.util.Map; -/** - * - */ public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java index cf288e0cc6f..c2c4424d4c8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java @@ -43,9 +43,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.List; -/** - * - */ public class TransportUpgradeStatusAction extends TransportBroadcastByNodeAction { private final IndicesService indicesService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java index e0318b13b97..98c4ea68d88 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.upgrade.get; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class UpgradeStatusAction extends Action { public static final UpgradeStatusAction INSTANCE = new UpgradeStatusAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java index 98dd1c1828d..cee5bdcabe5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java @@ -22,9 +22,6 @@ package org.elasticsearch.action.admin.indices.upgrade.get; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ public class UpgradeStatusRequestBuilder extends BroadcastOperationRequestBuilder { public UpgradeStatusRequestBuilder(ElasticsearchClient client, UpgradeStatusAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java index 550a5b15f66..6929a5ab544 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java @@ -27,9 +27,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ public final class ShardUpgradeRequest extends BroadcastShardRequest { private UpgradeRequest request = new UpgradeRequest(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java index 46c51757159..cca5a812c3e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java @@ -28,9 +28,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.text.ParseException; -/** - * - */ class ShardUpgradeResult implements Streamable { private ShardId shardId; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index f467c6ae749..02d58a9db7e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -36,9 +36,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -/** - * - */ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction { private final MetaDataUpdateSettingsService updateSettingsService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java index 5257b50132d..54632261ef4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.upgrade.post; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class UpgradeSettingsAction extends Action { public static final UpgradeSettingsAction INSTANCE = new UpgradeSettingsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java index ea145ba15bc..6da503ef828 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.io.stream.Streamable; import java.io.IOException; -/** - * - */ public class QueryExplanation implements Streamable { private String index; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java index 831ef6e1060..2ccf2f1bd3e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java @@ -20,14 +20,15 @@ package org.elasticsearch.action.admin.indices.validate.query; import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.internal.AliasFilter; import java.io.IOException; +import java.util.Objects; /** * Internal validate request executed directly against a specific index shard. @@ -39,21 +40,18 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { private boolean explain; private boolean rewrite; private long nowInMillis; - - @Nullable - private String[] filteringAliases; + private AliasFilter filteringAliases; public ShardValidateQueryRequest() { - } - ShardValidateQueryRequest(ShardId shardId, @Nullable String[] filteringAliases, ValidateQueryRequest request) { + public ShardValidateQueryRequest(ShardId shardId, AliasFilter filteringAliases, ValidateQueryRequest request) { super(shardId, request); this.query = request.query(); this.types = request.types(); this.explain = request.explain(); this.rewrite = request.rewrite(); - this.filteringAliases = filteringAliases; + this.filteringAliases = Objects.requireNonNull(filteringAliases, "filteringAliases must not be null"); this.nowInMillis = request.nowInMillis; } @@ -69,11 +67,11 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { return this.explain; } - public boolean rewrite() { - return this.rewrite; + public boolean rewrite() { + return this.rewrite; } - public String[] filteringAliases() { + public AliasFilter filteringAliases() { return filteringAliases; } @@ -93,14 +91,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { types[i] = in.readString(); } } - int aliasesSize = in.readVInt(); - if (aliasesSize > 0) { - filteringAliases = new String[aliasesSize]; - for (int i = 0; i < aliasesSize; i++) { - filteringAliases[i] = in.readString(); - } - } - + filteringAliases = new AliasFilter(in); explain = in.readBoolean(); rewrite = in.readBoolean(); nowInMillis = in.readVLong(); @@ -110,20 +101,11 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeNamedWriteable(query); - out.writeVInt(types.length); for (String type : types) { out.writeString(type); } - if (filteringAliases != null) { - out.writeVInt(filteringAliases.length); - for (String alias : filteringAliases) { - out.writeString(alias); - } - } else { - out.writeVInt(0); - } - + filteringAliases.writeTo(out); out.writeBoolean(explain); out.writeBoolean(rewrite); out.writeVLong(nowInMillis); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 718d3b25e69..b80b721149c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.action.ActionListener; @@ -43,6 +42,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.tasks.Task; @@ -56,9 +56,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicReferenceArray; -/** - * - */ public class TransportValidateQueryAction extends TransportBroadcastAction { private final SearchService searchService; @@ -80,8 +77,9 @@ public class TransportValidateQueryAction extends TransportBroadcastAction SearchContext.removeCurrent()); + Releasables.close(searchContext); } return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java index fdec5490c97..51791062c83 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.validate.query; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class ValidateQueryAction extends Action { public static final ValidateQueryAction INSTANCE = new ValidateQueryAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java index bfee7ec6b99..8e377968980 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java @@ -23,9 +23,6 @@ import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuild import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.index.query.QueryBuilder; -/** - * - */ public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder { public ValidateQueryRequestBuilder(ElasticsearchClient client, ValidateQueryAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkAction.java index e442f61061a..d4aba69c343 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkAction.java @@ -24,8 +24,6 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.transport.TransportRequestOptions; -/** - */ public class BulkAction extends Action { public static final BulkAction INSTANCE = new BulkAction(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 760c5781aea..987aa36585b 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -19,24 +19,17 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import java.io.IOException; -/** - * - */ public class BulkItemRequest implements Streamable { private int id; - private ActionRequest request; + private DocWriteRequest request; private volatile BulkItemResponse primaryResponse; private volatile boolean ignoreOnReplica; @@ -44,8 +37,7 @@ public class BulkItemRequest implements Streamable { } - public BulkItemRequest(int id, ActionRequest request) { - assert request instanceof IndicesRequest; + public BulkItemRequest(int id, DocWriteRequest request) { this.id = id; this.request = request; } @@ -54,14 +46,13 @@ public class BulkItemRequest implements Streamable { return id; } - public ActionRequest request() { + public DocWriteRequest request() { return request; } public String index() { - IndicesRequest indicesRequest = (IndicesRequest) request; - assert indicesRequest.indices().length == 1; - return indicesRequest.indices()[0]; + assert request.indices().length == 1; + return request.indices()[0]; } BulkItemResponse getPrimaryResponse() { @@ -92,15 +83,7 @@ public class BulkItemRequest implements Streamable { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - byte type = in.readByte(); - if (type == 0) { - request = new IndexRequest(); - } else if (type == 1) { - request = new DeleteRequest(); - } else if (type == 2) { - request = new UpdateRequest(); - } - request.readFrom(in); + request = DocWriteRequest.readDocumentRequest(in); if (in.readBoolean()) { primaryResponse = BulkItemResponse.readBulkItem(in); } @@ -110,14 +93,7 @@ public class BulkItemRequest implements Streamable { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - if (request instanceof IndexRequest) { - out.writeByte((byte) 0); - } else if (request instanceof DeleteRequest) { - out.writeByte((byte) 1); - } else if (request instanceof UpdateRequest) { - out.writeByte((byte) 2); - } - request.writeTo(out); + DocWriteRequest.writeDocumentRequest(out, request); out.writeOptionalStreamable(primaryResponse); out.writeBoolean(ignoreOnReplica); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index ad45ace84c9..2a1c3a1e35a 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -21,7 +21,9 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateResponse; @@ -50,7 +52,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(opType); + builder.startObject(opType.getLowercase()); if (failure == null) { response.toXContent(builder, params); builder.field(Fields.STATUS, response.status().getStatus()); @@ -183,7 +185,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { private int id; - private String opType; + private OpType opType; private DocWriteResponse response; @@ -193,13 +195,13 @@ public class BulkItemResponse implements Streamable, StatusToXContent { } - public BulkItemResponse(int id, String opType, DocWriteResponse response) { + public BulkItemResponse(int id, OpType opType, DocWriteResponse response) { this.id = id; - this.opType = opType; this.response = response; + this.opType = opType; } - public BulkItemResponse(int id, String opType, Failure failure) { + public BulkItemResponse(int id, OpType opType, Failure failure) { this.id = id; this.opType = opType; this.failure = failure; @@ -215,7 +217,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { /** * The operation type ("index", "create" or "delete"). */ - public String getOpType() { + public OpType getOpType() { return this.opType; } @@ -300,7 +302,11 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public void readFrom(StreamInput in) throws IOException { id = in.readVInt(); - opType = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { + opType = OpType.fromId(in.readByte()); + } else { + opType = OpType.fromString(in.readString()); + } byte type = in.readByte(); if (type == 0) { @@ -322,7 +328,11 @@ public class BulkItemResponse implements Streamable, StatusToXContent { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - out.writeString(opType); + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { + out.writeByte(opType.getId()); + } else { + out.writeString(opType.getLowercase()); + } if (response == null) { out.writeByte((byte) 2); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index db3cd15e9c4..6dacb21b239 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Client; @@ -250,24 +250,24 @@ public class BulkProcessor implements Closeable { * (for example, if no id is provided, one will be generated, or usage of the create flag). */ public BulkProcessor add(IndexRequest request) { - return add((ActionRequest) request); + return add((DocWriteRequest) request); } /** * Adds an {@link DeleteRequest} to the list of actions to execute. */ public BulkProcessor add(DeleteRequest request) { - return add((ActionRequest) request); + return add((DocWriteRequest) request); } /** * Adds either a delete or an index request. */ - public BulkProcessor add(ActionRequest request) { + public BulkProcessor add(DocWriteRequest request) { return add(request, null); } - public BulkProcessor add(ActionRequest request, @Nullable Object payload) { + public BulkProcessor add(DocWriteRequest request, @Nullable Object payload) { internalAdd(request, payload); return this; } @@ -282,7 +282,7 @@ public class BulkProcessor implements Closeable { } } - private synchronized void internalAdd(ActionRequest request, @Nullable Object payload) { + private synchronized void internalAdd(DocWriteRequest request, @Nullable Object payload) { ensureOpen(); bulkRequest.add(request, payload); executeIfNeeded(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index f766c2897fb..ee4f96d484a 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -49,6 +50,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -70,7 +72,7 @@ public class BulkRequest extends ActionRequest implements Composite * {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare * the one with the least casts. */ - final List> requests = new ArrayList<>(); + final List requests = new ArrayList<>(); List payloads = null; protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; @@ -85,14 +87,14 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(ActionRequest... requests) { - for (ActionRequest request : requests) { + public BulkRequest add(DocWriteRequest... requests) { + for (DocWriteRequest request : requests) { add(request, null); } return this; } - public BulkRequest add(ActionRequest request) { + public BulkRequest add(DocWriteRequest request) { return add(request, null); } @@ -102,7 +104,7 @@ public class BulkRequest extends ActionRequest implements Composite * @param payload Optional payload * @return the current bulk request */ - public BulkRequest add(ActionRequest request, @Nullable Object payload) { + public BulkRequest add(DocWriteRequest request, @Nullable Object payload) { if (request instanceof IndexRequest) { add((IndexRequest) request, payload); } else if (request instanceof DeleteRequest) { @@ -118,8 +120,8 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a list of requests to be executed. Either index or delete requests. */ - public BulkRequest add(Iterable> requests) { - for (ActionRequest request : requests) { + public BulkRequest add(Iterable requests) { + for (DocWriteRequest request : requests) { add(request); } return this; @@ -168,7 +170,7 @@ public class BulkRequest extends ActionRequest implements Composite sizeInBytes += request.upsertRequest().source().length(); } if (request.script() != null) { - sizeInBytes += request.script().getScript().length() * 2; + sizeInBytes += request.script().getIdOrCode().length() * 2; } return this; } @@ -205,18 +207,13 @@ public class BulkRequest extends ActionRequest implements Composite /** * The list of requests in this bulk request. */ - public List> requests() { + public List requests() { return this.requests; } @Override public List subRequests() { - List indicesRequests = new ArrayList<>(); - for (ActionRequest request : requests) { - assert request instanceof IndicesRequest; - indicesRequests.add((IndicesRequest) request); - } - return indicesRequests; + return requests.stream().collect(Collectors.toList()); } /** @@ -247,32 +244,32 @@ public class BulkRequest extends ActionRequest implements Composite /** * Adds a framed data in binary format */ - public BulkRequest add(byte[] data, int from, int length) throws Exception { + public BulkRequest add(byte[] data, int from, int length) throws IOException { return add(data, from, length, null, null); } /** * Adds a framed data in binary format */ - public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception { + public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType) throws IOException { return add(new BytesArray(data, from, length), defaultIndex, defaultType); } /** * Adds a framed data in binary format */ - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception { + public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws IOException { return add(data, defaultIndex, defaultType, null, null, null, null, null, true); } /** * Adds a framed data in binary format */ - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws Exception { + public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws IOException { return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex); } - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws Exception { + public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws IOException { XContent xContent = XContentFactory.xContent(data); int line = 0; int from = 0; @@ -511,7 +508,7 @@ public class BulkRequest extends ActionRequest implements Composite * @return Whether this bulk request contains index request with an ingest pipeline enabled. */ public boolean hasIndexRequestsWithPipelines() { - for (ActionRequest actionRequest : requests) { + for (DocWriteRequest actionRequest : requests) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { @@ -529,13 +526,13 @@ public class BulkRequest extends ActionRequest implements Composite if (requests.isEmpty()) { validationException = addValidationError("no requests added", validationException); } - for (ActionRequest request : requests) { + for (DocWriteRequest request : requests) { // We first check if refresh has been set if (((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { validationException = addValidationError( "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", validationException); } - ActionRequestValidationException ex = request.validate(); + ActionRequestValidationException ex = ((WriteRequest) request).validate(); if (ex != null) { if (validationException == null) { validationException = new ActionRequestValidationException(); @@ -553,20 +550,7 @@ public class BulkRequest extends ActionRequest implements Composite waitForActiveShards = ActiveShardCount.readFrom(in); int size = in.readVInt(); for (int i = 0; i < size; i++) { - byte type = in.readByte(); - if (type == 0) { - IndexRequest request = new IndexRequest(); - request.readFrom(in); - requests.add(request); - } else if (type == 1) { - DeleteRequest request = new DeleteRequest(); - request.readFrom(in); - requests.add(request); - } else if (type == 2) { - UpdateRequest request = new UpdateRequest(); - request.readFrom(in); - requests.add(request); - } + requests.add(DocWriteRequest.readDocumentRequest(in)); } refreshPolicy = RefreshPolicy.readFrom(in); timeout = new TimeValue(in); @@ -577,15 +561,8 @@ public class BulkRequest extends ActionRequest implements Composite super.writeTo(out); waitForActiveShards.writeTo(out); out.writeVInt(requests.size()); - for (ActionRequest request : requests) { - if (request instanceof IndexRequest) { - out.writeByte((byte) 0); - } else if (request instanceof DeleteRequest) { - out.writeByte((byte) 1); - } else if (request instanceof UpdateRequest) { - out.writeByte((byte) 2); - } - request.writeTo(out); + for (DocWriteRequest request : requests) { + DocWriteRequest.writeDocumentRequest(out, request); } refreshPolicy.writeTo(out); timeout.writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index ffc2407b8a4..25366d034ca 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -29,9 +29,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -/** - * - */ public class BulkShardRequest extends ReplicatedWriteRequest { private BulkItemRequest[] items; @@ -45,7 +42,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest { setRefreshPolicy(refreshPolicy); } - BulkItemRequest[] items() { + public BulkItemRequest[] items() { return items; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index 22260181bb1..b51ce624800 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -28,9 +28,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ public class BulkShardResponse extends ReplicationResponse implements WriteResponse { private ShardId shardId; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index da080b54b25..854b2fcf892 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -19,12 +19,10 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -58,18 +56,18 @@ import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.LongSupplier; +import java.util.stream.Collectors; /** - * + * Groups bulk request items by shard, optionally creating non-existent indices and + * delegates to {@link TransportShardBulkAction} for shard-level bulk execution */ public class TransportBulkAction extends HandledTransportAction { @@ -119,15 +117,9 @@ public class TransportBulkAction extends HandledTransportAction autoCreateIndices = new HashSet<>(); - for (ActionRequest request : bulkRequest.requests) { - if (request instanceof DocumentRequest) { - DocumentRequest req = (DocumentRequest) request; - autoCreateIndices.add(req.index()); - } else { - throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName()); - } - } + final Set autoCreateIndices = bulkRequest.requests.stream() + .map(DocWriteRequest::index) + .collect(Collectors.toSet()); final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size()); ClusterState state = clusterService.state(); for (String index : autoCreateIndices) { @@ -153,7 +145,7 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, ActionRequest request, String index, Exception e) { - if (request instanceof IndexRequest) { - IndexRequest indexRequest = (IndexRequest) request; - if (index.equals(indexRequest.index())) { - responses.set(idx, new BulkItemResponse(idx, "index", new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e))); - return true; - } - } else if (request instanceof DeleteRequest) { - DeleteRequest deleteRequest = (DeleteRequest) request; - if (index.equals(deleteRequest.index())) { - responses.set(idx, new BulkItemResponse(idx, "delete", new BulkItemResponse.Failure(deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), e))); - return true; - } - } else if (request instanceof UpdateRequest) { - UpdateRequest updateRequest = (UpdateRequest) request; - if (index.equals(updateRequest.index())) { - responses.set(idx, new BulkItemResponse(idx, "update", new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(), updateRequest.id(), e))); - return true; - } - } else { - throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName()); + private boolean setResponseFailureIfIndexMatches(AtomicArray responses, int idx, DocWriteRequest request, String index, Exception e) { + if (index.equals(request.index())) { + responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e))); + return true; } return false; } @@ -236,95 +211,56 @@ public class TransportBulkAction extends HandledTransportAction Operations mapping Map> requestsByShard = new HashMap<>(); - for (int i = 0; i < bulkRequest.requests.size(); i++) { - ActionRequest request = bulkRequest.requests.get(i); - if (request instanceof IndexRequest) { - IndexRequest indexRequest = (IndexRequest) request; - String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, indexRequest.id(), indexRequest.routing()).shardId(); - List list = requestsByShard.get(shardId); - if (list == null) { - list = new ArrayList<>(); - requestsByShard.put(shardId, list); - } - list.add(new BulkItemRequest(i, request)); - } else if (request instanceof DeleteRequest) { - DeleteRequest deleteRequest = (DeleteRequest) request; - String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.id(), deleteRequest.routing()).shardId(); - List list = requestsByShard.get(shardId); - if (list == null) { - list = new ArrayList<>(); - requestsByShard.put(shardId, list); - } - list.add(new BulkItemRequest(i, request)); - } else if (request instanceof UpdateRequest) { - UpdateRequest updateRequest = (UpdateRequest) request; - String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.id(), updateRequest.routing()).shardId(); - List list = requestsByShard.get(shardId); - if (list == null) { - list = new ArrayList<>(); - requestsByShard.put(shardId, list); - } - list.add(new BulkItemRequest(i, request)); + DocWriteRequest request = bulkRequest.requests.get(i); + if (request == null) { + continue; } + String concreteIndex = concreteIndices.getConcreteIndex(request.index()).getName(); + ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, request.id(), request.routing()).shardId(); + List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); + shardRequests.add(new BulkItemRequest(i, request)); } if (requestsByShard.isEmpty()) { @@ -364,19 +300,9 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, - final ConcreteIndices concreteIndices, - final MetaData metaData) { + private boolean addFailureIfIndexIsUnavailable(DocWriteRequest request, BulkRequest bulkRequest, AtomicArray responses, int idx, + final ConcreteIndices concreteIndices, + final MetaData metaData) { Index concreteIndex = concreteIndices.getConcreteIndex(request.index()); Exception unavailableException = null; if (concreteIndex == null) { @@ -413,15 +339,7 @@ public class TransportBulkAction extends HandledTransportAction { - - private static final String OP_TYPE_UPDATE = "update"; - private static final String OP_TYPE_DELETE = "delete"; +/** Performs shard-level bulk (index, delete or update) operations */ +public class TransportShardBulkAction extends TransportWriteAction { public static final String ACTION_NAME = BulkAction.NAME + "[s]"; @@ -88,7 +83,7 @@ public class TransportShardBulkAction extends TransportWriteAction onPrimaryShard(BulkShardRequest request, IndexShard indexShard) throws Exception { - ShardId shardId = request.shardId(); - final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - final IndexMetaData metaData = indexService.getIndexSettings().getIndexMetaData(); + protected WritePrimaryResult shardOperationOnPrimary(BulkShardRequest request, IndexShard primary) throws Exception { + final IndexMetaData metaData = primary.indexSettings().getIndexMetaData(); long[] preVersions = new long[request.items().length]; VersionType[] preVersionTypes = new VersionType[request.items().length]; Translog.Location location = null; for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) { - BulkItemRequest item = request.items()[requestIndex]; - location = handleItem(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item); + location = executeBulkItemRequest(metaData, primary, request, preVersions, preVersionTypes, location, requestIndex); } BulkItemResponse[] responses = new BulkItemResponse[request.items().length]; @@ -129,37 +121,101 @@ public class TransportShardBulkAction extends TransportWriteAction(response, location); + return new WritePrimaryResult(request, response, location, null, primary); } - private Translog.Location handleItem(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - if (item.request() instanceof IndexRequest) { - location = index(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item); - } else if (item.request() instanceof DeleteRequest) { - location = delete(request, indexShard, preVersions, preVersionTypes, location, requestIndex, item); - } else if (item.request() instanceof UpdateRequest) { - Tuple tuple = update(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item); - location = tuple.v1(); - item = tuple.v2(); - } else { - throw new IllegalStateException("Unexpected index operation: " + item.request()); - } - - assert item.getPrimaryResponse() != null; - assert preVersionTypes[requestIndex] != null; - return location; - } - - private Translog.Location index(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - IndexRequest indexRequest = (IndexRequest) item.request(); - preVersions[requestIndex] = indexRequest.version(); - preVersionTypes[requestIndex] = indexRequest.versionType(); + /** Executes bulk item requests and handles request execution exceptions */ + private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard primary, + BulkShardRequest request, + long[] preVersions, VersionType[] preVersionTypes, + Translog.Location location, int requestIndex) throws Exception { + final DocWriteRequest itemRequest = request.items()[requestIndex].request(); + preVersions[requestIndex] = itemRequest.version(); + preVersionTypes[requestIndex] = itemRequest.versionType(); + DocWriteRequest.OpType opType = itemRequest.opType(); try { - WriteResult result = shardIndexOperation(request, indexRequest, metaData, indexShard, true); - location = locationToSync(location, result.getLocation()); - // add the response - IndexResponse indexResponse = result.getResponse(); - setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse)); + // execute item request + final Engine.Result operationResult; + final DocWriteResponse response; + final BulkItemRequest replicaRequest; + switch (itemRequest.opType()) { + case CREATE: + case INDEX: + final IndexRequest indexRequest = (IndexRequest) itemRequest; + Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction); + if (indexResult.hasFailure()) { + response = null; + } else { + // update the version on request so it will happen on the replicas + final long version = indexResult.getVersion(); + indexRequest.version(version); + indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery()); + assert indexRequest.versionType().validateVersionForWrites(indexRequest.version()); + response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(), + indexResult.getVersion(), indexResult.isCreated()); + } + operationResult = indexResult; + replicaRequest = request.items()[requestIndex]; + break; + case UPDATE: + UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest), + primary, metaData, request, requestIndex); + operationResult = updateResultHolder.operationResult; + response = updateResultHolder.response; + replicaRequest = updateResultHolder.replicaRequest; + break; + case DELETE: + final DeleteRequest deleteRequest = (DeleteRequest) itemRequest; + Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary); + if (deleteResult.hasFailure()) { + response = null; + } else { + // update the request with the version so it will go to the replicas + deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery()); + deleteRequest.version(deleteResult.getVersion()); + assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version()); + response = new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(), + deleteResult.getVersion(), deleteResult.isFound()); + } + operationResult = deleteResult; + replicaRequest = request.items()[requestIndex]; + break; + default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); + } + // update the bulk item request because update request execution can mutate the bulk item request + request.items()[requestIndex] = replicaRequest; + if (operationResult == null) { // in case of noop update operation + assert response.getResult() == DocWriteResponse.Result.NOOP + : "only noop update can have null operation"; + replicaRequest.setIgnoreOnReplica(); + replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response)); + } else if (operationResult.hasFailure() == false) { + location = locationToSync(location, operationResult.getTranslogLocation()); + BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response); + replicaRequest.setPrimaryResponse(primaryResponse); + // set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though. + primaryResponse.getResponse().setShardInfo(new ShardInfo()); + } else { + DocWriteRequest docWriteRequest = replicaRequest.request(); + Exception failure = operationResult.getFailure(); + if (isConflictException(failure)) { + logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); + } else { + logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); + } + // if its a conflict failure, and we already executed the request on a primary (and we execute it + // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) + // then just use the response we got from the successful execution + if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) { + replicaRequest.setIgnoreOnReplica(); + replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(), + new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure))); + } + } + assert replicaRequest.getPrimaryResponse() != null; + assert preVersionTypes[requestIndex] != null; } catch (Exception e) { // nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed. // some operations were already perform and have a seqno assigned. we shouldn't just reindex them @@ -167,301 +223,173 @@ public class TransportShardBulkAction extends TransportWriteAction> void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest request) { - if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t); - } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", shardId, operation, request), t); + private static class UpdateResultHolder { + final BulkItemRequest replicaRequest; + final Engine.Result operationResult; + final DocWriteResponse response; + + private UpdateResultHolder(BulkItemRequest replicaRequest, Engine.Result operationResult, + DocWriteResponse response) { + this.replicaRequest = replicaRequest; + this.operationResult = operationResult; + this.response = response; } } - private Translog.Location delete(BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - DeleteRequest deleteRequest = (DeleteRequest) item.request(); - preVersions[requestIndex] = deleteRequest.version(); - preVersionTypes[requestIndex] = deleteRequest.versionType(); - - try { - // add the response - final WriteResult writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - DeleteResponse deleteResponse = writeResult.getResponse(); - location = locationToSync(location, writeResult.getLocation()); - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse)); - } catch (Exception e) { - // nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed. - // some operations were already perform and have a seqno assigned. we shouldn't just reindex them - // rethrow the failure if we are going to retry on primary and let parent failure to handle it - if (retryPrimaryException(e)) { - // restore updated versions... - for (int j = 0; j < requestIndex; j++) { - applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); - } - throw (ElasticsearchException) e; - } - logFailure(e, "delete", request.shardId(), deleteRequest); - // if its a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the successful execution - if (item.getPrimaryResponse() != null && isConflictException(e)) { - setResponse(item, item.getPrimaryResponse()); - } else { - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, - new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e))); - } - } - return location; - } - - private Tuple update(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { - UpdateRequest updateRequest = (UpdateRequest) item.request(); - preVersions[requestIndex] = updateRequest.version(); - preVersionTypes[requestIndex] = updateRequest.versionType(); - // We need to do the requested retries plus the initial attempt. We don't do < 1+retry_on_conflict because retry_on_conflict may be Integer.MAX_VALUE - for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) { - UpdateResult updateResult; + /** + * Executes update request, delegating to a index or delete operation after translation, + * handles retries on version conflict and constructs update response + * NOTE: reassigns bulk item request at requestIndex for replicas to + * execute translated update request (NOOP update is an exception). NOOP updates are + * indicated by returning a null operation in {@link UpdateResultHolder} + * */ + private UpdateResultHolder executeUpdateRequest(UpdateRequest updateRequest, IndexShard primary, + IndexMetaData metaData, BulkShardRequest request, + int requestIndex) throws Exception { + Engine.Result updateOperationResult = null; + UpdateResponse updateResponse = null; + BulkItemRequest replicaRequest = request.items()[requestIndex]; + int maxAttempts = updateRequest.retryOnConflict(); + for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) { + final UpdateHelper.Result translate; + // translate update request try { - updateResult = shardUpdateOperation(metaData, request, updateRequest, indexShard); - } catch (Exception t) { - updateResult = new UpdateResult(null, null, false, t, null); + translate = updateHelper.prepare(updateRequest, primary, threadPool::estimatedTimeInMillis); + } catch (Exception failure) { + // we may fail translating a update to index or delete operation + // we use index result to communicate failure while translating update request + updateOperationResult = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO); + break; // out of retry loop } - if (updateResult.success()) { - if (updateResult.writeResult != null) { - location = locationToSync(location, updateResult.writeResult.getLocation()); - } - switch (updateResult.result.getResponseResult()) { - case CREATED: - case UPDATED: - @SuppressWarnings("unchecked") - WriteResult result = updateResult.writeResult; - IndexRequest indexRequest = updateResult.request(); - BytesReference indexSourceAsBytes = indexRequest.source(); - // add the response - IndexResponse indexResponse = result.getResponse(); - UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getSeqNo(), indexResponse.getVersion(), indexResponse.getResult()); + // execute translated update request + switch (translate.getResponseResult()) { + case CREATED: + case UPDATED: + IndexRequest indexRequest = translate.action(); + MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); + indexRequest.process(mappingMd, allowIdGeneration, request.index()); + updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction); + if (updateOperationResult.hasFailure() == false) { + // update the version on request so it will happen on the replicas + final long version = updateOperationResult.getVersion(); + indexRequest.version(version); + indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery()); + assert indexRequest.versionType().validateVersionForWrites(indexRequest.version()); + } + break; + case DELETED: + DeleteRequest deleteRequest = translate.action(); + updateOperationResult = executeDeleteRequestOnPrimary(deleteRequest, primary); + if (updateOperationResult.hasFailure() == false) { + // update the request with the version so it will go to the replicas + deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery()); + deleteRequest.version(updateOperationResult.getVersion()); + assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version()); + } + break; + case NOOP: + primary.noopUpdate(updateRequest.type()); + break; + default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); + } + if (updateOperationResult == null) { + // this is a noop operation + updateResponse = translate.action(); + break; // out of retry loop + } else if (updateOperationResult.hasFailure() == false) { + // enrich update response and + // set translated update (index/delete) request for replica execution in bulk items + switch (updateOperationResult.getOperationType()) { + case INDEX: + IndexRequest updateIndexRequest = translate.action(); + final IndexResponse indexResponse = new IndexResponse(primary.shardId(), + updateIndexRequest.type(), updateIndexRequest.id(), updateOperationResult.getSeqNo(), + updateOperationResult.getVersion(), ((Engine.IndexResult) updateOperationResult).isCreated()); + BytesReference indexSourceAsBytes = updateIndexRequest.source(); + updateResponse = new UpdateResponse(indexResponse.getShardInfo(), + indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getSeqNo(), + indexResponse.getVersion(), indexResponse.getResult()); if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) || (updateRequest.fields() != null && updateRequest.fields().length > 0)) { - Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); - updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); + Tuple> sourceAndContent = + XContentHelper.convertToMap(indexSourceAsBytes, true); + updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), + indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); } - item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse)); + // set translated request as replica request + replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateIndexRequest); break; - case DELETED: - @SuppressWarnings("unchecked") - WriteResult writeResult = updateResult.writeResult; - DeleteResponse response = writeResult.getResponse(); - DeleteRequest deleteRequest = updateResult.request(); - updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), response.getResult()); - updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null)); - // Replace the update request to the translated delete request to execute on the replica. - item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest); - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse)); + case DELETE: + DeleteRequest updateDeleteRequest = translate.action(); + DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), + updateDeleteRequest.type(), updateDeleteRequest.id(), updateOperationResult.getSeqNo(), + updateOperationResult.getVersion(), ((Engine.DeleteResult) updateOperationResult).isFound()); + updateResponse = new UpdateResponse(deleteResponse.getShardInfo(), + deleteResponse.getShardId(), deleteResponse.getType(), deleteResponse.getId(), deleteResponse.getSeqNo(), + deleteResponse.getVersion(), deleteResponse.getResult()); + updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, + request.index(), deleteResponse.getVersion(), translate.updatedSourceAsMap(), + translate.updateSourceContentType(), null)); + // set translated request as replica request + replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest); break; - case NOOP: - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResult.noopResult)); - item.setIgnoreOnReplica(); // no need to go to the replica - break; - default: - throw new IllegalStateException("Illegal operation " + updateResult.result.getResponseResult()); } - // NOTE: Breaking out of the retry_on_conflict loop! - break; - } else if (updateResult.failure()) { - Throwable e = updateResult.error; - if (updateResult.retry) { - // updateAttemptCount is 0 based and marks current attempt, if it's equal to retryOnConflict we are going out of the iteration - if (updateAttemptsCount >= updateRequest.retryOnConflict()) { - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, - new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e))); - } - } else { - // nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed. - // some operations were already perform and have a seqno assigned. we shouldn't just reindex them - // rethrow the failure if we are going to retry on primary and let parent failure to handle it - if (retryPrimaryException(e)) { - // restore updated versions... - for (int j = 0; j < requestIndex; j++) { - applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); - } - throw (ElasticsearchException) e; - } - // if its a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the successful execution - if (item.getPrimaryResponse() != null && isConflictException(e)) { - setResponse(item, item.getPrimaryResponse()); - } else if (updateResult.result == null) { - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e))); - } else { - switch (updateResult.result.getResponseResult()) { - case CREATED: - case UPDATED: - IndexRequest indexRequest = updateResult.request(); - logFailure(e, "index", request.shardId(), indexRequest); - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, - new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e))); - break; - case DELETED: - DeleteRequest deleteRequest = updateResult.request(); - logFailure(e, "delete", request.shardId(), deleteRequest); - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, - new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e))); - break; - default: - throw new IllegalStateException("Illegal operation " + updateResult.result.getResponseResult()); - } - } - // NOTE: Breaking out of the retry_on_conflict loop! - break; - } - + // successful operation + break; // out of retry loop + } else if (updateOperationResult.getFailure() instanceof VersionConflictEngineException == false) { + // not a version conflict exception + break; // out of retry loop } } - return Tuple.tuple(location, item); - } - - private void setResponse(BulkItemRequest request, BulkItemResponse response) { - request.setPrimaryResponse(response); - if (response.isFailed()) { - request.setIgnoreOnReplica(); - } else { - // Set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though. - response.getResponse().setShardInfo(new ShardInfo()); - } - } - - private WriteResult shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, IndexMetaData metaData, - IndexShard indexShard, boolean processed) throws Exception { - - MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); - if (!processed) { - indexRequest.process(mappingMd, allowIdGeneration, request.index()); - } - return TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); - } - - static class UpdateResult { - - final UpdateHelper.Result result; - final ActionRequest actionRequest; - final boolean retry; - final Throwable error; - final WriteResult writeResult; - final UpdateResponse noopResult; - - UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, boolean retry, Throwable error, WriteResult writeResult) { - this.result = result; - this.actionRequest = actionRequest; - this.retry = retry; - this.error = error; - this.writeResult = writeResult; - this.noopResult = null; - } - - UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, WriteResult writeResult) { - this.result = result; - this.actionRequest = actionRequest; - this.writeResult = writeResult; - this.retry = false; - this.error = null; - this.noopResult = null; - } - - public UpdateResult(UpdateHelper.Result result, UpdateResponse updateResponse) { - this.result = result; - this.noopResult = updateResponse; - this.actionRequest = null; - this.writeResult = null; - this.retry = false; - this.error = null; - } - - - boolean failure() { - return error != null; - } - - boolean success() { - return noopResult != null || writeResult != null; - } - - @SuppressWarnings("unchecked") - T request() { - return (T) actionRequest; - } - - - } - - private UpdateResult shardUpdateOperation(IndexMetaData metaData, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard) { - UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard); - switch (translate.getResponseResult()) { - case CREATED: - case UPDATED: - IndexRequest indexRequest = translate.action(); - try { - WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, metaData, indexShard, false); - return new UpdateResult(translate, indexRequest, result); - } catch (Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - boolean retry = false; - if (cause instanceof VersionConflictEngineException) { - retry = true; - } - return new UpdateResult(translate, indexRequest, retry, cause, null); - } - case DELETED: - DeleteRequest deleteRequest = translate.action(); - try { - WriteResult result = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - return new UpdateResult(translate, deleteRequest, result); - } catch (Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - boolean retry = false; - if (cause instanceof VersionConflictEngineException) { - retry = true; - } - return new UpdateResult(translate, deleteRequest, retry, cause, null); - } - case NOOP: - UpdateResponse updateResponse = translate.action(); - indexShard.noopUpdate(updateRequest.type()); - return new UpdateResult(translate, updateResponse); - default: - throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); - } + return new UpdateResultHolder(replicaRequest, updateOperationResult, updateResponse); } @Override - protected Location onReplicaShard(BulkShardRequest request, IndexShard indexShard) { + protected WriteReplicaResult shardOperationOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { Translog.Location location = null; for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; - if (item == null || item.isIgnoreOnReplica()) { - continue; - } - if (item.request() instanceof IndexRequest) { - IndexRequest indexRequest = (IndexRequest) item.request(); + if (item.isIgnoreOnReplica() == false) { + DocWriteRequest docWriteRequest = item.request(); + final Engine.Result operationResult; try { - Engine.Index operation = TransportIndexAction.executeIndexRequestOnReplica(indexRequest, indexShard); - location = locationToSync(location, operation.getTranslogLocation()); + switch (docWriteRequest.opType()) { + case CREATE: + case INDEX: + operationResult = executeIndexRequestOnReplica(((IndexRequest) docWriteRequest), replica); + break; + case DELETE: + operationResult = executeDeleteRequestOnReplica(((DeleteRequest) docWriteRequest), replica); + break; + default: + throw new IllegalStateException("Unexpected request operation type on replica: " + + docWriteRequest.opType().getLowercase()); + } + if (operationResult.hasFailure()) { + // check if any transient write operation failures should be bubbled up + Exception failure = operationResult.getFailure(); + assert failure instanceof VersionConflictEngineException + || failure instanceof MapperParsingException + || failure instanceof EngineClosedException + || failure instanceof IndexShardClosedException + : "expected any one of [version conflict, mapper parsing, engine closed, index shard closed]" + + " failures. got " + failure; + if (!ignoreReplicaException(failure)) { + throw failure; + } + } else { + location = locationToSync(location, operationResult.getTranslogLocation()); + } } catch (Exception e) { // if its not an ignore replica failure, we need to make sure to bubble up the failure // so we will fail the shard @@ -469,36 +397,9 @@ public class TransportShardBulkAction extends TransportWriteAction { public static final DeleteAction INSTANCE = new DeleteAction(); diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index bdf09e3e532..280324227cc 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.delete; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; @@ -43,7 +43,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * @see org.elasticsearch.client.Client#delete(DeleteRequest) * @see org.elasticsearch.client.Requests#deleteRequest(String) */ -public class DeleteRequest extends ReplicatedWriteRequest implements DocumentRequest { +public class DeleteRequest extends ReplicatedWriteRequest implements DocWriteRequest { private String type; private String id; @@ -90,6 +90,9 @@ public class DeleteRequest extends ReplicatedWriteRequest impleme if (!versionType.validateVersionForWrites(version)) { validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); } + if (versionType == VersionType.FORCE) { + validationException = addValidationError("version type [force] may no longer be used", validationException); + } return validationException; } @@ -164,28 +167,33 @@ public class DeleteRequest extends ReplicatedWriteRequest impleme return this.routing; } - /** - * Sets the version, which will cause the delete operation to only be performed if a matching - * version exists and no changes happened on the doc since then. - */ + @Override public DeleteRequest version(long version) { this.version = version; return this; } + @Override public long version() { return this.version; } + @Override public DeleteRequest versionType(VersionType versionType) { this.versionType = versionType; return this; } + @Override public VersionType versionType() { return this.versionType; } + @Override + public OpType opType() { + return OpType.DELETE; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 5bf618543f4..7dbdb95c570 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -39,7 +39,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; @@ -49,7 +48,7 @@ import org.elasticsearch.transport.TransportService; /** * Performs the delete operation. */ -public class TransportDeleteAction extends TransportWriteAction { +public class TransportDeleteAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; @@ -61,7 +60,7 @@ public class TransportDeleteAction extends TransportWriteAction listener) { ClusterState state = clusterService.state(); if (autoCreateIndex.shouldAutoCreate(request.index(), state)) { - createIndexAction.execute(task, new CreateIndexRequest().index(request.index()).cause("auto(delete api)").masterNodeTimeout(request.timeout()), new ActionListener() { + CreateIndexRequest createIndexRequest = new CreateIndexRequest() + .index(request.index()) + .cause("auto(delete api)") + .masterNodeTimeout(request.timeout()); + createIndexAction.execute(task, createIndexRequest, new ActionListener() { @Override public void onResponse(CreateIndexResponse result) { innerExecute(task, request, listener); @@ -119,31 +122,43 @@ public class TransportDeleteAction extends TransportWriteAction onPrimaryShard(DeleteRequest request, IndexShard indexShard) { - return executeDeleteRequestOnPrimary(request, indexShard); + protected WritePrimaryResult shardOperationOnPrimary(DeleteRequest request, IndexShard primary) throws Exception { + final Engine.DeleteResult result = executeDeleteRequestOnPrimary(request, primary); + final DeleteResponse response; + if (result.hasFailure() == false) { + // update the request with the version so it will go to the replicas + request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); + request.version(result.getVersion()); + assert request.versionType().validateVersionForWrites(request.version()); + response = new DeleteResponse( + primary.shardId(), + request.type(), + request.id(), + result.getSeqNo(), + result.getVersion(), + result.isFound()); + } else { + response = null; + } + return new WritePrimaryResult(request, response, result.getTranslogLocation(), result.getFailure(), primary); } @Override - protected Location onReplicaShard(DeleteRequest request, IndexShard indexShard) { - return executeDeleteRequestOnReplica(request, indexShard).getTranslogLocation(); + protected WriteReplicaResult shardOperationOnReplica(DeleteRequest request, IndexShard replica) throws Exception { + final Engine.DeleteResult result = executeDeleteRequestOnReplica(request, replica); + return new WriteReplicaResult(request, result.getTranslogLocation(), result.getFailure(), replica); } - public static WriteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) { - Engine.Delete delete = indexShard.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType()); - indexShard.delete(delete); - // update the request with the version so it will go to the replicas - request.versionType(delete.versionType().versionTypeForReplicationAndRecovery()); - request.version(delete.version()); - request.seqNo(delete.seqNo()); - assert request.versionType().validateVersionForWrites(request.version()); - DeleteResponse response = new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.seqNo(), delete.version(), delete.found()); - return new WriteResult<>(response, delete.getTranslogLocation()); + public static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) { + final Engine.Delete delete = primary.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType()); + return primary.delete(delete); } - public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, IndexShard indexShard) { - Engine.Delete delete = indexShard.prepareDeleteOnReplica(request.type(), request.id(), request.seqNo(), request.version(), request.versionType()); - indexShard.delete(delete); - return delete; + public static Engine.DeleteResult executeDeleteRequestOnReplica(DeleteRequest request, IndexShard replica) { + final Engine.Delete delete = + replica.prepareDeleteOnReplica(request.type(), request.id(), request.seqNo(), request.version(), request.versionType()); + return replica.delete(delete); } + } diff --git a/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java b/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java index fef1b307e99..5d8ca27657f 100644 --- a/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java +++ b/core/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.internal.AliasFilter; import java.io.IOException; @@ -43,7 +44,7 @@ public class ExplainRequest extends SingleShardRequest { private String[] storedFields; private FetchSourceContext fetchSourceContext; - private String[] filteringAlias = Strings.EMPTY_ARRAY; + private AliasFilter filteringAlias = new AliasFilter(null, Strings.EMPTY_ARRAY); long nowInMillis; @@ -131,11 +132,11 @@ public class ExplainRequest extends SingleShardRequest { return this; } - public String[] filteringAlias() { + public AliasFilter filteringAlias() { return filteringAlias; } - public ExplainRequest filteringAlias(String[] filteringAlias) { + public ExplainRequest filteringAlias(AliasFilter filteringAlias) { if (filteringAlias != null) { this.filteringAlias = filteringAlias; } @@ -166,7 +167,7 @@ public class ExplainRequest extends SingleShardRequest { routing = in.readOptionalString(); preference = in.readOptionalString(); query = in.readNamedWriteable(QueryBuilder.class); - filteringAlias = in.readStringArray(); + filteringAlias = new AliasFilter(in); storedFields = in.readOptionalStringArray(); fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); nowInMillis = in.readVLong(); @@ -180,7 +181,7 @@ public class ExplainRequest extends SingleShardRequest { out.writeOptionalString(routing); out.writeOptionalString(preference); out.writeNamedWriteable(query); - out.writeStringArray(filteringAlias); + filteringAlias.writeTo(out); out.writeOptionalStringArray(storedFields); out.writeOptionalWriteable(fetchSourceContext); out.writeVLong(nowInMillis); diff --git a/core/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java index cf7b4821816..d2d9bb3b820 100644 --- a/core/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java @@ -99,12 +99,9 @@ public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder SearchContext.removeCurrent()); + Releasables.close(result, context); } } diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java index 085952c9be6..2c51e918497 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.fieldstats; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class FieldStatsAction extends Action { public static final FieldStatsAction INSTANCE = new FieldStatsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java index d0b40374d6b..4137c4cd1c2 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java @@ -35,8 +35,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -/** - */ public class FieldStatsRequest extends BroadcastRequest { public static final String DEFAULT_LEVEL = "cluster"; diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java index 1a3a8070e46..7f9a744df83 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.fieldstats; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class FieldStatsRequestBuilder extends BroadcastOperationRequestBuilder { diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java index 14e2f13d4ff..10928dc9a78 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java @@ -30,8 +30,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -/** - */ public class FieldStatsResponse extends BroadcastResponse { private Map> indicesMergedFieldStats; private Map conflicts; diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java index 85a0d469541..3844895bc24 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java @@ -29,8 +29,6 @@ import java.util.Arrays; import java.util.HashSet; import java.util.Set; -/** - */ public class FieldStatsShardRequest extends BroadcastShardRequest { private String[] fields; diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java index 7cc298729f0..d94cfcd2958 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java @@ -28,8 +28,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; -/** - */ public class FieldStatsShardResponse extends BroadcastShardResponse { private Map> fieldStats; diff --git a/core/src/main/java/org/elasticsearch/action/get/GetAction.java b/core/src/main/java/org/elasticsearch/action/get/GetAction.java index eb499ffb8c2..69c67b1be2b 100644 --- a/core/src/main/java/org/elasticsearch/action/get/GetAction.java +++ b/core/src/main/java/org/elasticsearch/action/get/GetAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.get; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class GetAction extends Action { public static final GetAction INSTANCE = new GetAction(); diff --git a/core/src/main/java/org/elasticsearch/action/get/GetRequest.java b/core/src/main/java/org/elasticsearch/action/get/GetRequest.java index 38dd10df963..93045182f4c 100644 --- a/core/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/core/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -101,6 +101,9 @@ public class GetRequest extends SingleShardRequest implements Realti validationException = ValidateActions.addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); } + if (versionType == VersionType.FORCE) { + validationException = ValidateActions.addValidationError("version type [force] may no longer be used", validationException); + } return validationException; } diff --git a/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java index f56905d86ee..973b130bedb 100644 --- a/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java @@ -99,12 +99,8 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder { public static final MultiGetAction INSTANCE = new MultiGetAction(); diff --git a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 1decd8ce946..1308d56acaf 100644 --- a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -335,7 +335,7 @@ public class MultiGetRequest extends ActionRequest implements I return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, null, data, allowExplicitIndex); } - public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, BytesReference data, boolean allowExplicitIndex) throws Exception { + public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, BytesReference data, boolean allowExplicitIndex) throws IOException { try (XContentParser parser = XContentFactory.xContent(data).createParser(data)) { XContentParser.Token token; String currentFieldName = null; @@ -370,7 +370,7 @@ public class MultiGetRequest extends ActionRequest implements I long version = Versions.MATCH_ANY; VersionType versionType = VersionType.INTERNAL; - FetchSourceContext fetchSourceContext = null; + FetchSourceContext fetchSourceContext = FetchSourceContext.FETCH_SOURCE; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -401,9 +401,11 @@ public class MultiGetRequest extends ActionRequest implements I versionType = VersionType.fromString(parser.text()); } else if ("_source".equals(currentFieldName)) { if (parser.isBooleanValue()) { - fetchSourceContext = new FetchSourceContext(parser.booleanValue()); + fetchSourceContext = new FetchSourceContext(parser.booleanValue(), fetchSourceContext.includes(), + fetchSourceContext.excludes()); } else if (token == XContentParser.Token.VALUE_STRING) { - fetchSourceContext = new FetchSourceContext(new String[]{parser.text()}); + fetchSourceContext = new FetchSourceContext(fetchSourceContext.fetchSource(), + new String[]{parser.text()}, fetchSourceContext.excludes()); } else { throw new ElasticsearchParseException("illegal type for _source: [{}]", token); } @@ -422,7 +424,8 @@ public class MultiGetRequest extends ActionRequest implements I while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { includes.add(parser.text()); } - fetchSourceContext = new FetchSourceContext(includes.toArray(Strings.EMPTY_ARRAY)); + fetchSourceContext = new FetchSourceContext(fetchSourceContext.fetchSource(), includes.toArray(Strings.EMPTY_ARRAY) + , fetchSourceContext.excludes()); } } else if (token == XContentParser.Token.START_OBJECT) { @@ -450,7 +453,7 @@ public class MultiGetRequest extends ActionRequest implements I } } - fetchSourceContext = new FetchSourceContext( + fetchSourceContext = new FetchSourceContext(fetchSourceContext.fetchSource(), includes == null ? Strings.EMPTY_ARRAY : includes.toArray(new String[includes.size()]), excludes == null ? Strings.EMPTY_ARRAY : excludes.toArray(new String[excludes.size()])); } @@ -463,7 +466,7 @@ public class MultiGetRequest extends ActionRequest implements I aFields = defaultFields; } items.add(new Item(index, type, id).routing(routing).storedFields(aFields).parent(parent).version(version).versionType(versionType) - .fetchSourceContext(fetchSourceContext == null ? defaultFetchSource : fetchSourceContext)); + .fetchSourceContext(fetchSourceContext == FetchSourceContext.FETCH_SOURCE ? defaultFetchSource : fetchSourceContext)); } } diff --git a/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java b/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java index 4e62030d329..10133d66152 100644 --- a/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java +++ b/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java @@ -47,8 +47,8 @@ public class TransportMultiGetAction extends HandledTransportAction listener) { ClusterState clusterState = clusterService.state(); - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); final AtomicArray responses = new AtomicArray<>(request.items.size()); + final Map shardRequests = new HashMap<>(); - Map shardRequests = new HashMap<>(); for (int i = 0; i < request.items.size(); i++) { MultiGetRequest.Item item = request.items.get(i); - if (!clusterState.metaData().hasConcreteIndex(item.index())) { - responses.set(i, new MultiGetItemResponse(null, new MultiGetResponse.Failure(item.index(), item.type(), item.id(), new IndexNotFoundException(item.index())))); - continue; - } - item.routing(clusterState.metaData().resolveIndexRouting(item.parent(), item.routing(), item.index())); - String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item).getName(); - if (item.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, item.type())) { - responses.set(i, new MultiGetItemResponse(null, new MultiGetResponse.Failure(concreteSingleIndex, item.type(), item.id(), - new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + item.type() + "]/[" + item.id() + "]")))); + + String concreteSingleIndex; + try { + concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item).getName(); + + item.routing(clusterState.metaData().resolveIndexRouting(item.parent(), item.routing(), concreteSingleIndex)); + if ((item.routing() == null) && (clusterState.getMetaData().routingRequired(concreteSingleIndex, item.type()))) { + String message = "routing is required for [" + concreteSingleIndex + "]/[" + item.type() + "]/[" + item.id() + "]"; + responses.set(i, newItemFailure(concreteSingleIndex, item.type(), item.id(), new IllegalArgumentException(message))); + continue; + } + } catch (Exception e) { + responses.set(i, newItemFailure(item.index(), item.type(), item.id(), e)); continue; } + ShardId shardId = clusterService.operationRouting() - .getShards(clusterState, concreteSingleIndex, item.id(), item.routing(), null).shardId(); + .getShards(clusterState, concreteSingleIndex, item.id(), item.routing(), null) + .shardId(); + MultiGetShardRequest shardRequest = shardRequests.get(shardId); if (shardRequest == null) { - shardRequest = new MultiGetShardRequest(request, shardId.getIndexName(), shardId.id()); + shardRequest = new MultiGetShardRequest(request, shardId.getIndexName(), shardId.getId()); shardRequests.put(shardId, shardRequest); } shardRequest.add(i, item); } - if (shardRequests.size() == 0) { + if (shardRequests.isEmpty()) { // only failures.. listener.onResponse(new MultiGetResponse(responses.toArray(new MultiGetItemResponse[responses.length()]))); } @@ -97,7 +103,8 @@ public class TransportMultiGetAction extends HandledTransportAction { public static final IndexAction INSTANCE = new IndexAction(); diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 1f6bc9108c2..fdc248c4b37 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -21,7 +21,7 @@ package org.elasticsearch.action.index; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.TimestampParsingException; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; @@ -67,68 +67,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * @see org.elasticsearch.client.Requests#indexRequest(String) * @see org.elasticsearch.client.Client#index(IndexRequest) */ -public class IndexRequest extends ReplicatedWriteRequest implements DocumentRequest { - - /** - * Operation type controls if the type of the index operation. - */ - public enum OpType { - /** - * Index the source. If there an existing document with the id, it will - * be replaced. - */ - INDEX((byte) 0), - /** - * Creates the resource. Simply adds it to the index, if there is an existing - * document with the id, then it won't be removed. - */ - CREATE((byte) 1); - - private final byte id; - private final String lowercase; - - OpType(byte id) { - this.id = id; - this.lowercase = this.toString().toLowerCase(Locale.ENGLISH); - } - - /** - * The internal representation of the operation type. - */ - public byte id() { - return id; - } - - public String lowercase() { - return this.lowercase; - } - - /** - * Constructs the operation type from its internal representation. - */ - public static OpType fromId(byte id) { - if (id == 0) { - return INDEX; - } else if (id == 1) { - return CREATE; - } else { - throw new IllegalArgumentException("No type match for [" + id + "]"); - } - } - - public static OpType fromString(String sOpType) { - String lowersOpType = sOpType.toLowerCase(Locale.ROOT); - switch (lowersOpType) { - case "create": - return OpType.CREATE; - case "index": - return OpType.INDEX; - default: - throw new IllegalArgumentException("opType [" + sOpType + "] not allowed, either [index] or [create] are allowed"); - } - } - - } +public class IndexRequest extends ReplicatedWriteRequest implements DocWriteRequest { private String type; private String id; @@ -221,6 +160,10 @@ public class IndexRequest extends ReplicatedWriteRequest implement validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); } + if (versionType == VersionType.FORCE) { + validationException = addValidationError("version type [force] may no longer be used", validationException); + } + if (ttl != null) { if (ttl.millis() < 0) { validationException = addValidationError("ttl must not be negative", validationException); @@ -526,6 +469,9 @@ public class IndexRequest extends ReplicatedWriteRequest implement * Sets the type of operation to perform. */ public IndexRequest opType(OpType opType) { + if (opType != OpType.CREATE && opType != OpType.INDEX) { + throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]"); + } this.opType = opType; if (opType == OpType.CREATE) { version(Versions.MATCH_DELETED); @@ -535,11 +481,19 @@ public class IndexRequest extends ReplicatedWriteRequest implement } /** - * Sets a string representation of the {@link #opType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can + * Sets a string representation of the {@link #opType(OpType)}. Can * be either "index" or "create". */ public IndexRequest opType(String opType) { - return opType(OpType.fromString(opType)); + String op = opType.toLowerCase(Locale.ROOT); + if (op.equals("create")) { + opType(OpType.CREATE); + } else if (op.equals("index")) { + opType(OpType.INDEX); + } else { + throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]"); + } + return this; } @@ -554,34 +508,29 @@ public class IndexRequest extends ReplicatedWriteRequest implement } } - /** - * The type of operation to perform. - */ + @Override public OpType opType() { return this.opType; } - /** - * Sets the version, which will cause the index operation to only be performed if a matching - * version exists and no changes happened on the doc since then. - */ + @Override public IndexRequest version(long version) { this.version = version; return this; } + @Override public long version() { return this.version; } - /** - * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}. - */ + @Override public IndexRequest versionType(VersionType versionType) { this.versionType = versionType; return this; } + @Override public VersionType versionType() { return this.versionType; } @@ -673,7 +622,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement out.writeOptionalString(timestamp); out.writeOptionalWriteable(ttl); out.writeBytesReference(source); - out.writeByte(opType.id()); + out.writeByte(opType.getId()); out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index c4609e03aa5..310ef3fb928 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.index; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -200,7 +201,7 @@ public class IndexRequestBuilder extends ReplicationRequestBuilderallowIdGeneration: If the id is set not, should it be generated. Defaults to true. * */ -public class TransportIndexAction extends TransportWriteAction { +public class TransportIndexAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; @@ -76,7 +76,7 @@ public class TransportIndexAction extends TransportWriteAction onPrimaryShard(IndexRequest request, IndexShard indexShard) throws Exception { - return executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); + protected WritePrimaryResult shardOperationOnPrimary(IndexRequest request, IndexShard primary) throws Exception { + final Engine.IndexResult indexResult = executeIndexRequestOnPrimary(request, primary, mappingUpdatedAction); + final IndexResponse response; + if (indexResult.hasFailure() == false) { + // update the version on request so it will happen on the replicas + final long version = indexResult.getVersion(); + request.version(version); + request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); + request.seqNo(indexResult.getSeqNo()); + assert request.versionType().validateVersionForWrites(request.version()); + response = new IndexResponse(primary.shardId(), request.type(), request.id(), indexResult.getSeqNo(), + indexResult.getVersion(), indexResult.isCreated()); + } else { + response = null; + } + return new WritePrimaryResult(request, response, indexResult.getTranslogLocation(), indexResult.getFailure(), primary); } @Override - protected Location onReplicaShard(IndexRequest request, IndexShard indexShard) { - return executeIndexRequestOnReplica(request, indexShard).getTranslogLocation(); + protected WriteReplicaResult shardOperationOnReplica(IndexRequest request, IndexShard replica) throws Exception { + final Engine.IndexResult indexResult = executeIndexRequestOnReplica(request, replica); + return new WriteReplicaResult(request, indexResult.getTranslogLocation(), indexResult.getFailure(), replica); } /** * Execute the given {@link IndexRequest} on a replica shard, throwing a * {@link RetryOnReplicaException} if the operation needs to be re-tried. */ - public static Engine.Index executeIndexRequestOnReplica(IndexRequest request, IndexShard indexShard) { - final ShardId shardId = indexShard.shardId(); + public static Engine.IndexResult executeIndexRequestOnReplica(IndexRequest request, IndexShard replica) { + final ShardId shardId = replica.shardId(); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); - final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.seqNo(), request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); + final Engine.Index operation; + try { + operation = replica.prepareIndexOnReplica(sourceToParse, request.seqNo(), request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); + } catch (MapperParsingException e) { + return new Engine.IndexResult(e, request.version(), request.seqNo()); + } Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); } - indexShard.index(operation); - return operation; + return replica.index(operation); } /** Utility method to prepare an index operation on primary shards */ - public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) { + static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) { SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); - return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); + return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); } - public static WriteResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, + public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary, MappingUpdatedAction mappingUpdatedAction) throws Exception { - Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); + Engine.Index operation; + try { + operation = prepareIndexOperationOnPrimary(request, primary); + } catch (MapperParsingException | IllegalArgumentException e) { + return new Engine.IndexResult(e, request.version(), request.seqNo()); + } Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); - final ShardId shardId = indexShard.shardId(); + final ShardId shardId = primary.shardId(); if (update != null) { - mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update); - operation = prepareIndexOperationOnPrimary(request, indexShard); + // can throw timeout exception when updating mappings or ISE for attempting to update default mappings + // which are bubbled up + try { + mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update); + } catch (IllegalArgumentException e) { + // throws IAE on conflicts merging dynamic mappings + return new Engine.IndexResult(e, request.version(), request.seqNo()); + } + try { + operation = prepareIndexOperationOnPrimary(request, primary); + } catch (MapperParsingException | IllegalArgumentException e) { + return new Engine.IndexResult(e, request.version(), request.seqNo()); + } update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { throw new ReplicationOperation.RetryOnPrimaryException(shardId, "Dynamic mappings are not available on the node that holds the primary yet"); } } - indexShard.index(operation); - // update the version on request so it will happen on the replicas - final long version = operation.version(); - request.version(version); - request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); - request.seqNo(operation.seqNo()); - - assert request.versionType().validateVersionForWrites(request.version()); - - IndexResponse response = new IndexResponse(shardId, request.type(), request.id(), request.seqNo(), request.version(), operation.isCreated()); - return new WriteResult<>(response, operation.getTranslogLocation()); + return primary.index(operation); } } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java index fc14e0de2df..90cbce135af 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java @@ -32,4 +32,12 @@ public class DeletePipelineRequestBuilder extends ActionRequestBuilder> { + static final class BulkRequestModifier implements Iterator { final BulkRequest bulkRequest; final Set failedSlots; @@ -150,7 +151,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } @Override - public ActionRequest next() { + public DocWriteRequest next() { return bulkRequest.requests().get(++currentSlot); } @@ -171,7 +172,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio int slot = 0; originalSlots = new int[bulkRequest.requests().size() - failedSlots.size()]; for (int i = 0; i < bulkRequest.requests().size(); i++) { - ActionRequest request = bulkRequest.requests().get(i); + DocWriteRequest request = bulkRequest.requests().get(i); if (failedSlots.contains(i) == false) { modifiedBulkRequest.add(request); originalSlots[slot++] = i; @@ -207,7 +208,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio // 3) Continue with the next request in the bulk. failedSlots.add(currentSlot); BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e); - itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType().lowercase(), failure)); + itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType(), failure)); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java index 3ce14d8dacd..96db19d5472 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractAsyncAction.java @@ -26,8 +26,10 @@ abstract class AbstractAsyncAction { private final long startTime; - protected AbstractAsyncAction() { - this.startTime = System.currentTimeMillis(); + protected AbstractAsyncAction() { this(System.currentTimeMillis());} + + protected AbstractAsyncAction(long startTime) { + this.startTime = startTime; } /** diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 6cb68b8e9be..c973804a39d 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -27,104 +27,73 @@ import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; -import org.elasticsearch.threadpool.ThreadPool; import java.util.List; import java.util.Map; -import java.util.Set; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; -import static org.elasticsearch.action.search.TransportSearchHelper.internalSearchRequest; abstract class AbstractSearchAsyncAction extends AbstractAsyncAction { protected final Logger logger; protected final SearchTransportService searchTransportService; - private final IndexNameExpressionResolver indexNameExpressionResolver; - protected final SearchPhaseController searchPhaseController; - protected final ThreadPool threadPool; + private final Executor executor; protected final ActionListener listener; - protected final GroupShardsIterator shardsIts; + private final GroupShardsIterator shardsIts; protected final SearchRequest request; - protected final ClusterState clusterState; - protected final DiscoveryNodes nodes; + /** Used by subclasses to resolve node ids to DiscoveryNodes. **/ + protected final Function nodeIdToDiscoveryNode; + protected final SearchTask task; protected final int expectedSuccessfulOps; private final int expectedTotalOps; protected final AtomicInteger successfulOps = new AtomicInteger(); private final AtomicInteger totalOps = new AtomicInteger(); protected final AtomicArray firstResults; + private final Map aliasFilter; + private final long clusterStateVersion; private volatile AtomicArray shardFailures; private final Object shardFailuresMutex = new Object(); protected volatile ScoreDoc[] sortedShardDocs; - protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, ClusterService clusterService, - IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request, - ActionListener listener) { + protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, + Function nodeIdToDiscoveryNode, + Map aliasFilter, Executor executor, SearchRequest request, + ActionListener listener, GroupShardsIterator shardsIts, long startTime, + long clusterStateVersion, SearchTask task) { + super(startTime); this.logger = logger; this.searchTransportService = searchTransportService; - this.indexNameExpressionResolver = indexNameExpressionResolver; - this.searchPhaseController = searchPhaseController; - this.threadPool = threadPool; + this.executor = executor; this.request = request; + this.task = task; this.listener = listener; - - this.clusterState = clusterService.state(); - nodes = clusterState.nodes(); - - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); - - // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name - // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead - // of just for the _search api - String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request.indicesOptions(), - startTime(), request.indices()); - - for (String index : concreteIndices) { - clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index); - } - - Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), - request.indices()); - - shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); - final int shardCount = shardsIts.size(); - failIfOverShardCountLimit(clusterService, shardCount); - expectedSuccessfulOps = shardCount; + this.nodeIdToDiscoveryNode = nodeIdToDiscoveryNode; + this.clusterStateVersion = clusterStateVersion; + this.shardsIts = shardsIts; + expectedSuccessfulOps = shardsIts.size(); // we need to add 1 for non active partition, since we count it in the total! expectedTotalOps = shardsIts.totalSizeWith1ForEmpty(); - firstResults = new AtomicArray<>(shardsIts.size()); + this.aliasFilter = aliasFilter; } - private void failIfOverShardCountLimit(ClusterService clusterService, int shardCount) { - final long shardCountLimit = clusterService.getClusterSettings().get(TransportSearchAction.SHARD_COUNT_LIMIT_SETTING); - if (shardCount > shardCountLimit) { - throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of " - + shardCountLimit + ". This limit exists because querying many shards at the same time can make the " - + "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to " - + "have a smaller number of larger shards. Update [" + TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey() - + "] to a greater value if you really want to query that many shards at the same time."); - } - } + public void start() { if (expectedSuccessfulOps == 0) { @@ -152,14 +121,14 @@ abstract class AbstractSearchAsyncAction // no more active shards... (we should not really get here, but just for safety) onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); } else { - final DiscoveryNode node = nodes.get(shard.currentNodeId()); + final DiscoveryNode node = nodeIdToDiscoveryNode.apply(shard.currentNodeId()); if (node == null) { onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); } else { - String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState, - shard.index().getName(), request.indices()); - sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, - startTime()), new ActionListener() { + AliasFilter filter = this.aliasFilter.get(shard.index().getName()); + ShardSearchTransportRequest transportRequest = new ShardSearchTransportRequest(request, shard, shardsIts.size(), + filter, startTime()); + sendExecuteFirstPhase(node, transportRequest , new ActionListener() { @Override public void onResponse(FirstResult result) { onFirstPhaseResult(shardIndex, shard, result, shardIt); @@ -319,7 +288,7 @@ abstract class AbstractSearchAsyncAction private void raiseEarlyFailure(Exception e) { for (AtomicArray.Entry entry : firstResults.asList()) { try { - DiscoveryNode node = nodes.get(entry.value.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.shardTarget().nodeId()); sendReleaseSearchContext(entry.value.id(), node); } catch (Exception inner) { inner.addSuppressed(e); @@ -344,7 +313,7 @@ abstract class AbstractSearchAsyncAction if (queryResult.hasHits() && docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs try { - DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(entry.value.queryResult().shardTarget().nodeId()); sendReleaseSearchContext(entry.value.queryResult().id(), node); } catch (Exception e) { logger.trace("failed to release context", e); @@ -402,7 +371,7 @@ abstract class AbstractSearchAsyncAction sb.append(result.shardTarget()); } - logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version()); + logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterStateVersion); } moveToSecondPhase(); } @@ -410,4 +379,9 @@ abstract class AbstractSearchAsyncAction protected abstract void moveToSecondPhase() throws Exception; protected abstract String firstPhaseName(); + + protected Executor getExecutor() { + return executor; + } + } diff --git a/core/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java index 6523378df4d..151635ac93f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class ClearScrollAction extends Action { public static final ClearScrollAction INSTANCE = new ClearScrollAction(); diff --git a/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java b/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java index 17343e86912..f2f26d655d3 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java @@ -31,8 +31,6 @@ import java.util.List; import static org.elasticsearch.action.ValidateActions.addValidationError; -/** - */ public class ClearScrollRequest extends ActionRequest { private List scrollIds; diff --git a/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java index 6dac0c4b3b3..b1ca4a60a85 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java @@ -24,8 +24,6 @@ import org.elasticsearch.client.ElasticsearchClient; import java.util.List; -/** - */ public class ClearScrollRequestBuilder extends ActionRequestBuilder { public ClearScrollRequestBuilder(ElasticsearchClient client, ClearScrollAction action) { diff --git a/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java b/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java index 0887d268199..ff8314acce5 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java @@ -31,8 +31,6 @@ import java.io.IOException; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; -/** - */ public class ClearScrollResponse extends ActionResponse implements StatusToXContent { private boolean succeeded; diff --git a/core/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java index ffa0a4b63f0..04651c00c02 100644 --- a/core/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class MultiSearchAction extends Action { public static final MultiSearchAction INSTANCE = new MultiSearchAction(); diff --git a/core/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java b/core/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java index 2ddb35e1357..f2ea5356106 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java +++ b/core/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java @@ -19,9 +19,6 @@ package org.elasticsearch.action.search; -/** - * - */ class ParsedScrollId { public static final String QUERY_THEN_FETCH_TYPE = "queryThenFetch"; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchAction.java index 501fe1afb76..38a77fa772a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class SearchAction extends Action { public static final SearchAction INSTANCE = new SearchAction(); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index ba73b0f4bea..54117495cba 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -24,31 +24,35 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction { private final AtomicArray queryFetchResults; - + private final SearchPhaseController searchPhaseController; SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, - request, listener); + Function nodeIdToDiscoveryNode, + Map aliasFilter, SearchPhaseController searchPhaseController, + Executor executor, SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, + request, listener, shardsIts, startTime, clusterStateVersion, task); + this.searchPhaseController = searchPhaseController; queryFetchResults = new AtomicArray<>(firstResults.length()); } @@ -60,7 +64,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction listener) { - searchTransportService.sendExecuteDfs(node, request, listener); + searchTransportService.sendExecuteDfs(node, request, task, listener); } @Override @@ -70,7 +74,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction entry : firstResults.asList()) { DfsSearchResult dfsResult = entry.value; - DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId()); QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest); } @@ -78,7 +82,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction() { + searchTransportService.sendExecuteFetch(node, querySearchRequest, task, new ActionListener() { @Override public void onResponse(QueryFetchSearchResult result) { result.shardTarget(dfsResult.shardTarget()); @@ -115,7 +119,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index ccd646ae129..3f8b20bc1fa 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -26,36 +26,42 @@ import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { final AtomicArray queryResults; final AtomicArray fetchResults; final AtomicArray docIdsToLoad; + private final SearchPhaseController searchPhaseController; SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, - request, listener); + Function nodeIdToDiscoveryNode, + Map aliasFilter, SearchPhaseController searchPhaseController, + Executor executor, SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, + SearchTask task) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, + request, listener, shardsIts, startTime, clusterStateVersion, task); + this.searchPhaseController = searchPhaseController; queryResults = new AtomicArray<>(firstResults.length()); fetchResults = new AtomicArray<>(firstResults.length()); docIdsToLoad = new AtomicArray<>(firstResults.length()); @@ -69,7 +75,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction listener) { - searchTransportService.sendExecuteDfs(node, request, listener); + searchTransportService.sendExecuteDfs(node, request, task, listener); } @Override @@ -78,7 +84,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : firstResults.asList()) { DfsSearchResult dfsResult = entry.value; - DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(dfsResult.shardTarget().nodeId()); QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); executeQuery(entry.index, dfsResult, counter, querySearchRequest, node); } @@ -86,7 +92,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction() { + searchTransportService.sendExecuteQuery(node, querySearchRequest, task, new ActionListener() { @Override public void onResponse(QuerySearchResult result) { result.shardTarget(dfsResult.shardTarget()); @@ -149,7 +155,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : docIdsToLoad.asList()) { QuerySearchResult queryResult = queryResults.get(entry.index); - DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId()); ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard); executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } @@ -157,7 +163,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction() { + searchTransportService.sendExecuteFetch(node, fetchSearchRequest, task, new ActionListener() { @Override public void onResponse(FetchSearchResult result) { result.shardTarget(shardTarget); @@ -192,7 +198,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { final boolean isScrollRequest = request.scroll() != null; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 7306e645e0d..6312d051656 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -30,10 +30,8 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; @@ -70,12 +68,9 @@ import java.util.Map; import java.util.stream.Collectors; import java.util.stream.StreamSupport; -/** - * - */ public class SearchPhaseController extends AbstractComponent { - public static final Comparator> QUERY_RESULT_ORDERING = (o1, o2) -> { + private static final Comparator> QUERY_RESULT_ORDERING = (o1, o2) -> { int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index()); if (i == 0) { i = o1.value.shardTarget().shardId().id() - o2.value.shardTarget().shardId().id(); @@ -83,17 +78,15 @@ public class SearchPhaseController extends AbstractComponent { return i; }; - public static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0]; + private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0]; private final BigArrays bigArrays; private final ScriptService scriptService; - private final ClusterService clusterService; - SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) { + SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService) { super(settings); this.bigArrays = bigArrays; this.scriptService = scriptService; - this.clusterService = clusterService; } public AggregatedDfs aggregateDfs(AtomicArray results) { @@ -489,7 +482,7 @@ public class SearchPhaseController extends AbstractComponent { for (AtomicArray.Entry entry : queryResults) { aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations()); } - ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, clusterService.state()); + ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService); aggregations = InternalAggregations.reduce(aggregationsList, reduceContext); List pipelineAggregators = firstResult.pipelineAggregators(); if (pipelineAggregators != null) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java index d799bc26764..25e7e14bb87 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java @@ -22,24 +22,33 @@ package org.elasticsearch.action.search; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; +import java.util.function.Function; class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction { + private final SearchPhaseController searchPhaseController; + SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchTransportService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, - request, listener); + Function nodeIdToDiscoveryNode, + Map aliasFilter, + SearchPhaseController searchPhaseController, Executor executor, + SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, + SearchTask task) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, + request, listener, shardsIts, startTime, clusterStateVersion, task); + this.searchPhaseController = searchPhaseController; + } @Override @@ -50,12 +59,12 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction listener) { - searchTransportService.sendExecuteFetch(node, request, listener); + searchTransportService.sendExecuteFetch(node, request, task, listener); } @Override protected void moveToSecondPhase() throws Exception { - threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { final boolean isScrollRequest = request.scroll() != null; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 6df2bb3f87e..23b744e5de1 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -26,31 +26,39 @@ import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchResultProvider; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { final AtomicArray fetchResults; final AtomicArray docIdsToLoad; + private final SearchPhaseController searchPhaseController; - SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchService, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SearchPhaseController searchPhaseController, ThreadPool threadPool, - SearchRequest request, ActionListener listener) { - super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener); + SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, + Function nodeIdToDiscoveryNode, Map aliasFilter, + SearchPhaseController searchPhaseController, Executor executor, + SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, + SearchTask task) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, request, listener, + shardsIts, startTime, clusterStateVersion, task); + this.searchPhaseController = searchPhaseController; fetchResults = new AtomicArray<>(firstResults.length()); docIdsToLoad = new AtomicArray<>(firstResults.length()); } @@ -63,7 +71,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction listener) { - searchTransportService.sendExecuteQuery(node, request, listener); + searchTransportService.sendExecuteQuery(node, request, task, listener); } @Override @@ -82,7 +90,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction entry : docIdsToLoad.asList()) { QuerySearchResultProvider queryResult = firstResults.get(entry.index); - DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId()); + DiscoveryNode node = nodeIdToDiscoveryNode.apply(queryResult.shardTarget().nodeId()); ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard); executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } @@ -90,7 +98,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction() { + searchTransportService.sendExecuteFetch(node, fetchSearchRequest, task, new ActionListener() { @Override public void onResponse(FetchSearchResult result) { result.shardTarget(shardTarget); @@ -125,7 +133,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction(listener) { + getExecutor().execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { final boolean isScrollRequest = request.scroll() != null; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java index a1b1a02a97e..de27805b139 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -31,6 +31,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; import java.util.Arrays; @@ -275,6 +277,11 @@ public final class SearchRequest extends ActionRequest implements return source != null && source.isSuggestOnly(); } + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId) { + return new SearchTask(id, type, action, getDescription(), parentTaskId); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java index eccfa0526a1..6bd23f7741a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class SearchScrollAction extends Action { public static final SearchScrollAction INSTANCE = new SearchScrollAction(); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java index 2bdf7dc30f9..bf53fc719c6 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java @@ -44,6 +44,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { private final SearchPhaseController searchPhaseController; private final SearchTransportService searchTransportService; private final SearchScrollRequest request; + private final SearchTask task; private final ActionListener listener; private final ParsedScrollId scrollId; private final DiscoveryNodes nodes; @@ -52,13 +53,14 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { private final AtomicInteger successfulOps; private final AtomicInteger counter; - SearchScrollQueryAndFetchAsyncAction(Logger logger, ClusterService clusterService, - SearchTransportService searchTransportService, SearchPhaseController searchPhaseController, - SearchScrollRequest request, ParsedScrollId scrollId, ActionListener listener) { + SearchScrollQueryAndFetchAsyncAction(Logger logger, ClusterService clusterService, SearchTransportService searchTransportService, + SearchPhaseController searchPhaseController, SearchScrollRequest request, SearchTask task, + ParsedScrollId scrollId, ActionListener listener) { this.logger = logger; this.searchPhaseController = searchPhaseController; this.searchTransportService = searchTransportService; this.request = request; + this.task = task; this.listener = listener; this.scrollId = scrollId; this.nodes = clusterService.state().nodes(); @@ -128,7 +130,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) { InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request); - searchTransportService.sendExecuteFetch(node, internalRequest, new ActionListener() { + searchTransportService.sendExecuteFetch(node, internalRequest, task, new ActionListener() { @Override public void onResponse(ScrollQueryFetchSearchResult result) { queryFetchResults.set(shardIndex, result.result()); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index 4024d3b5f39..851e3343bc2 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -44,6 +44,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScro class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { private final Logger logger; + private final SearchTask task; private final SearchTransportService searchTransportService; private final SearchPhaseController searchPhaseController; private final SearchScrollRequest request; @@ -56,13 +57,14 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { private volatile ScoreDoc[] sortedShardDocs; private final AtomicInteger successfulOps; - SearchScrollQueryThenFetchAsyncAction(Logger logger, ClusterService clusterService, - SearchTransportService searchTransportService, SearchPhaseController searchPhaseController, - SearchScrollRequest request, ParsedScrollId scrollId, ActionListener listener) { + SearchScrollQueryThenFetchAsyncAction(Logger logger, ClusterService clusterService, SearchTransportService searchTransportService, + SearchPhaseController searchPhaseController, SearchScrollRequest request, SearchTask task, + ParsedScrollId scrollId, ActionListener listener) { this.logger = logger; this.searchTransportService = searchTransportService; this.searchPhaseController = searchPhaseController; this.request = request; + this.task = task; this.listener = listener; this.scrollId = scrollId; this.nodes = clusterService.state().nodes(); @@ -124,7 +126,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) { InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request); - searchTransportService.sendExecuteQuery(node, internalRequest, new ActionListener() { + searchTransportService.sendExecuteQuery(node, internalRequest, task, new ActionListener() { @Override public void onResponse(ScrollQuerySearchResult result) { queryResults.set(shardIndex, result.queryResult()); @@ -184,7 +186,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index]; ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc); DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId()); - searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener() { + searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, task, new ActionListener() { @Override public void onResponse(FetchSearchResult result) { result.shardTarget(querySearchResult.shardTarget()); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java index 9ab2a4cf560..8a171e24a1e 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java @@ -25,15 +25,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.Scroll; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -/** - * - */ public class SearchScrollRequest extends ActionRequest { private String scrollId; @@ -110,6 +109,11 @@ public class SearchScrollRequest extends ActionRequest { out.writeOptionalWriteable(scroll); } + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId) { + return new SearchTask(id, type, action, getDescription(), parentTaskId); + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/core/src/main/java/org/elasticsearch/discovery/InitialStateDiscoveryListener.java b/core/src/main/java/org/elasticsearch/action/search/SearchTask.java similarity index 65% rename from core/src/main/java/org/elasticsearch/discovery/InitialStateDiscoveryListener.java rename to core/src/main/java/org/elasticsearch/action/search/SearchTask.java index 1ec55c874b4..24f94a43319 100644 --- a/core/src/main/java/org/elasticsearch/discovery/InitialStateDiscoveryListener.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchTask.java @@ -17,17 +17,18 @@ * under the License. */ -package org.elasticsearch.discovery; +package org.elasticsearch.action.search; + +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.TaskId; /** - * A listener that should be called by the {@link org.elasticsearch.discovery.Discovery} component - * when the first valid initial cluster state has been submitted and processed by the cluster service. - *

- * Note, this listener should be registered with the discovery service before it has started. - * - * + * Task storing information about a currently running search request. */ -public interface InitialStateDiscoveryListener { +public class SearchTask extends CancellableTask { + + public SearchTask(long id, String type, String action, String description, TaskId parentTaskId) { + super(id, type, action, description, parentTaskId); + } - void initialStateProcessed(); } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 0451a8920eb..9b5d180ce10 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; @@ -42,7 +44,10 @@ import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.query.ScrollQuerySearchResult; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TaskAwareTransportRequestHandler; +import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; @@ -77,22 +82,22 @@ public class SearchTransportService extends AbstractComponent { public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) { transportService.sendRequest(node, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(request, contextId), - new ActionListenerResponseHandler<>(new ActionListener() { - @Override - public void onResponse(SearchFreeContextResponse response) { - // no need to respond if it was freed or not - } + new ActionListenerResponseHandler<>(new ActionListener() { + @Override + public void onResponse(SearchFreeContextResponse response) { + // no need to respond if it was freed or not + } - @Override - public void onFailure(Exception e) { + @Override + public void onFailure(Exception e) { - } - }, SearchFreeContextResponse::new)); + } + }, SearchFreeContextResponse::new)); } public void sendFreeContext(DiscoveryNode node, long contextId, final ActionListener listener) { transportService.sendRequest(node, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(contextId), - new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new)); + new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new)); } public void sendClearAllScrollContexts(DiscoveryNode node, final ActionListener listener) { @@ -100,59 +105,62 @@ public class SearchTransportService extends AbstractComponent { new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE)); } - public void sendExecuteDfs(DiscoveryNode node, final ShardSearchTransportRequest request, + public void sendExecuteDfs(DiscoveryNode node, final ShardSearchTransportRequest request, SearchTask task, final ActionListener listener) { - transportService.sendRequest(node, DFS_ACTION_NAME, request, new ActionListenerResponseHandler<>(listener, DfsSearchResult::new)); + transportService.sendChildRequest(node, DFS_ACTION_NAME, request, task, + new ActionListenerResponseHandler<>(listener, DfsSearchResult::new)); } - public void sendExecuteQuery(DiscoveryNode node, final ShardSearchTransportRequest request, + public void sendExecuteQuery(DiscoveryNode node, final ShardSearchTransportRequest request, SearchTask task, final ActionListener listener) { - transportService.sendRequest(node, QUERY_ACTION_NAME, request, - new ActionListenerResponseHandler<>(listener, QuerySearchResult::new)); - } - - public void sendExecuteQuery(DiscoveryNode node, final QuerySearchRequest request, final ActionListener listener) { - transportService.sendRequest(node, QUERY_ID_ACTION_NAME, request, + transportService.sendChildRequest(node, QUERY_ACTION_NAME, request, task, new ActionListenerResponseHandler<>(listener, QuerySearchResult::new)); } - public void sendExecuteQuery(DiscoveryNode node, final InternalScrollSearchRequest request, + public void sendExecuteQuery(DiscoveryNode node, final QuerySearchRequest request, SearchTask task, + final ActionListener listener) { + transportService.sendChildRequest(node, QUERY_ID_ACTION_NAME, request, task, + new ActionListenerResponseHandler<>(listener, QuerySearchResult::new)); + } + + public void sendExecuteQuery(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task, final ActionListener listener) { - transportService.sendRequest(node, QUERY_SCROLL_ACTION_NAME, request, - new ActionListenerResponseHandler<>(listener, ScrollQuerySearchResult::new)); + transportService.sendChildRequest(node, QUERY_SCROLL_ACTION_NAME, request, task, + new ActionListenerResponseHandler<>(listener, ScrollQuerySearchResult::new)); } - public void sendExecuteFetch(DiscoveryNode node, final ShardSearchTransportRequest request, + public void sendExecuteFetch(DiscoveryNode node, final ShardSearchTransportRequest request, SearchTask task, final ActionListener listener) { - transportService.sendRequest(node, QUERY_FETCH_ACTION_NAME, request, - new ActionListenerResponseHandler<>(listener, QueryFetchSearchResult::new)); + transportService.sendChildRequest(node, QUERY_FETCH_ACTION_NAME, request, task, + new ActionListenerResponseHandler<>(listener, QueryFetchSearchResult::new)); } - public void sendExecuteFetch(DiscoveryNode node, final QuerySearchRequest request, + public void sendExecuteFetch(DiscoveryNode node, final QuerySearchRequest request, SearchTask task, final ActionListener listener) { - transportService.sendRequest(node, QUERY_QUERY_FETCH_ACTION_NAME, request, - new ActionListenerResponseHandler<>(listener, QueryFetchSearchResult::new)); + transportService.sendChildRequest(node, QUERY_QUERY_FETCH_ACTION_NAME, request, task, + new ActionListenerResponseHandler<>(listener, QueryFetchSearchResult::new)); } - public void sendExecuteFetch(DiscoveryNode node, final InternalScrollSearchRequest request, + public void sendExecuteFetch(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task, final ActionListener listener) { - transportService.sendRequest(node, QUERY_FETCH_SCROLL_ACTION_NAME, request, - new ActionListenerResponseHandler<>(listener, ScrollQueryFetchSearchResult::new)); + transportService.sendChildRequest(node, QUERY_FETCH_SCROLL_ACTION_NAME, request, task, + new ActionListenerResponseHandler<>(listener, ScrollQueryFetchSearchResult::new)); } - public void sendExecuteFetch(DiscoveryNode node, final ShardFetchSearchRequest request, + public void sendExecuteFetch(DiscoveryNode node, final ShardFetchSearchRequest request, SearchTask task, final ActionListener listener) { - sendExecuteFetch(node, FETCH_ID_ACTION_NAME, request, listener); + sendExecuteFetch(node, FETCH_ID_ACTION_NAME, request, task, listener); } - public void sendExecuteFetchScroll(DiscoveryNode node, final ShardFetchRequest request, + public void sendExecuteFetchScroll(DiscoveryNode node, final ShardFetchRequest request, SearchTask task, final ActionListener listener) { - sendExecuteFetch(node, FETCH_ID_SCROLL_ACTION_NAME, request, listener); + sendExecuteFetch(node, FETCH_ID_SCROLL_ACTION_NAME, request, task, listener); } - private void sendExecuteFetch(DiscoveryNode node, String action, final ShardFetchRequest request, + private void sendExecuteFetch(DiscoveryNode node, String action, final ShardFetchRequest request, SearchTask task, final ActionListener listener) { - transportService.sendRequest(node, action, request, new ActionListenerResponseHandler<>(listener, FetchSearchResult::new)); + transportService.sendChildRequest(node, action, request, task, + new ActionListenerResponseHandler<>(listener, FetchSearchResult::new)); } static class ScrollFreeContextRequest extends TransportRequest { @@ -252,64 +260,103 @@ public class SearchTransportService extends AbstractComponent { public static void registerRequestHandler(TransportService transportService, SearchService searchService) { transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME, - ((request, channel) -> { - boolean freed = searchService.freeContext(request.id()); - channel.sendResponse(new SearchFreeContextResponse(freed)); - })); + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(ScrollFreeContextRequest request, TransportChannel channel, Task task) throws Exception { + boolean freed = searchService.freeContext(request.id()); + channel.sendResponse(new SearchFreeContextResponse(freed)); + } + }); transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { - boolean freed = searchService.freeContext(request.id()); - channel.sendResponse(new SearchFreeContextResponse(freed)); + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(SearchFreeContextRequest request, TransportChannel channel, Task task) throws Exception { + boolean freed = searchService.freeContext(request.id()); + channel.sendResponse(new SearchFreeContextResponse(freed)); + } }); transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, () -> TransportRequest.Empty.INSTANCE, - ThreadPool.Names.SAME, (request, channel) -> { - searchService.freeAllScrollContexts(); - channel.sendResponse(TransportResponse.Empty.INSTANCE); + ThreadPool.Names.SAME, + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(TransportRequest.Empty request, TransportChannel channel, Task task) throws Exception { + searchService.freeAllScrollContexts(); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } }); transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, - (request, channel) -> { - DfsSearchResult result = searchService.executeDfsPhase(request); - channel.sendResponse(result); + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception { + DfsSearchResult result = searchService.executeDfsPhase(request, (SearchTask)task); + channel.sendResponse(result); + + } }); transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, - (request, channel) -> { - QuerySearchResultProvider result = searchService.executeQueryPhase(request); - channel.sendResponse(result); + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception { + QuerySearchResultProvider result = searchService.executeQueryPhase(request, (SearchTask)task); + channel.sendResponse(result); + } }); transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH, - (request, channel) -> { - QuerySearchResult result = searchService.executeQueryPhase(request); - channel.sendResponse(result); + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(QuerySearchRequest request, TransportChannel channel, Task task) throws Exception { + QuerySearchResult result = searchService.executeQueryPhase(request, (SearchTask)task); + channel.sendResponse(result); + } }); transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH, - (request, channel) -> { - ScrollQuerySearchResult result = searchService.executeQueryPhase(request); - channel.sendResponse(result); + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel, Task task) throws Exception { + ScrollQuerySearchResult result = searchService.executeQueryPhase(request, (SearchTask)task); + channel.sendResponse(result); + } }); transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, - (request, channel) -> { - QueryFetchSearchResult result = searchService.executeFetchPhase(request); - channel.sendResponse(result); + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception { + QueryFetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); + channel.sendResponse(result); + } }); transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH, - (request, channel) -> { - QueryFetchSearchResult result = searchService.executeFetchPhase(request); - channel.sendResponse(result); + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(QuerySearchRequest request, TransportChannel channel, Task task) throws Exception { + QueryFetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); + channel.sendResponse(result); + } }); transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH, - (request, channel) -> { - ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request); - channel.sendResponse(result); + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel, Task task) throws Exception { + ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); + channel.sendResponse(result); + } }); transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH, - (request, channel) -> { - FetchSearchResult result = searchService.executeFetchPhase(request); - channel.sendResponse(result); + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(ShardFetchRequest request, TransportChannel channel, Task task) throws Exception { + FetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); + channel.sendResponse(result); + } }); transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH, - (request, channel) -> { - FetchSearchResult result = searchService.executeFetchPhase(request); - channel.sendResponse(result); + new TaskAwareTransportRequestHandler() { + @Override + public void messageReceived(ShardFetchSearchRequest request, TransportChannel channel, Task task) throws Exception { + FetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task); + channel.sendResponse(result); + } }); + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index efd04035276..2bceccce385 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -51,7 +51,7 @@ public class TransportMultiSearchAction extends HandledTransportAction buildPerIndexAliasFilter(SearchRequest request, ClusterState clusterState, String...concreteIndices) { + final Map aliasFilterMap = new HashMap<>(); + for (String index : concreteIndices) { + clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index); + AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, index, request.indices()); + if (aliasFilter != null) { + aliasFilterMap.put(index, aliasFilter); + } + } + return aliasFilterMap; } @Override - protected void doExecute(SearchRequest searchRequest, ActionListener listener) { + protected void doExecute(Task task, SearchRequest searchRequest, ActionListener listener) { + // pure paranoia if time goes backwards we are at least positive + final long startTimeInMillis = Math.max(0, System.currentTimeMillis()); + ClusterState clusterState = clusterService.state(); + clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); + + // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name + // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead + // of just for the _search api + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, searchRequest.indicesOptions(), + startTimeInMillis, searchRequest.indices()); + Map aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, concreteIndices); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), + searchRequest.indices()); + GroupShardsIterator shardIterators = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, + searchRequest.preference()); + failIfOverShardCountLimit(clusterService, shardIterators.size()); + // optimize search type for cases where there is only one shard group to search on try { - ClusterState clusterState = clusterService.state(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, searchRequest); - Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, - searchRequest.routing(), searchRequest.indices()); - int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap); - if (shardCount == 1) { + if (shardIterators.size() == 1) { // if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard searchRequest.searchType(QUERY_AND_FETCH); } @@ -95,27 +130,42 @@ public class TransportSearchAction extends HandledTransportAction listener) { + @Override + protected final void doExecute(SearchRequest searchRequest, ActionListener listener) { + throw new UnsupportedOperationException("the task parameter is required"); + } + + private AbstractSearchAsyncAction searchAsyncAction(SearchTask task, SearchRequest searchRequest, GroupShardsIterator shardIterators, + long startTime, ClusterState state, Map aliasFilter, + ActionListener listener) { + final Function nodesLookup = state.nodes()::get; + final long clusterStateVersion = state.version(); + Executor executor = threadPool.executor(ThreadPool.Names.SEARCH); AbstractSearchAsyncAction searchAsyncAction; switch(searchRequest.searchType()) { case DFS_QUERY_THEN_FETCH: - searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup, + aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion, task); break; case QUERY_THEN_FETCH: - searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup, + aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion, task); break; case DFS_QUERY_AND_FETCH: - searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup, + aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion, task); break; case QUERY_AND_FETCH: - searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, clusterService, - indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener); + searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup, + aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + clusterStateVersion, task); break; default: throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]"); @@ -123,4 +173,15 @@ public class TransportSearchAction extends HandledTransportAction shardCountLimit) { + throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of " + + shardCountLimit + ". This limit exists because querying many shards at the same time can make the " + + "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to " + + "have a smaller number of larger shards. Update [" + SHARD_COUNT_LIMIT_SETTING.getKey() + + "] to a greater value if you really want to query that many shards at the same time."); + } + } + } diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java index db42527b125..a09a651086b 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java @@ -30,16 +30,8 @@ import org.elasticsearch.search.internal.ShardSearchTransportRequest; import java.io.IOException; import java.util.Base64; -/** - * - */ final class TransportSearchHelper { - static ShardSearchTransportRequest internalSearchRequest(ShardRouting shardRouting, int numberOfShards, SearchRequest request, - String[] filteringAliases, long nowInMillis) { - return new ShardSearchTransportRequest(request, shardRouting, numberOfShards, filteringAliases, nowInMillis); - } - static InternalScrollSearchRequest internalScrollSearchRequest(long id, SearchScrollRequest request) { return new InternalScrollSearchRequest(request, id); } diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index 9d2307f1b4f..9b70727485a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -35,9 +36,6 @@ import static org.elasticsearch.action.search.ParsedScrollId.QUERY_AND_FETCH_TYP import static org.elasticsearch.action.search.ParsedScrollId.QUERY_THEN_FETCH_TYPE; import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId; -/** - * - */ public class TransportSearchScrollAction extends HandledTransportAction { private final ClusterService clusterService; @@ -52,22 +50,26 @@ public class TransportSearchScrollAction extends HandledTransportAction listener) { + protected final void doExecute(SearchScrollRequest request, ActionListener listener) { + throw new UnsupportedOperationException("the task parameter is required"); + } + @Override + protected void doExecute(Task task, SearchScrollRequest request, ActionListener listener) { try { ParsedScrollId scrollId = parseScrollId(request.scrollId()); AbstractAsyncAction action; switch (scrollId.getType()) { case QUERY_THEN_FETCH_TYPE: action = new SearchScrollQueryThenFetchAsyncAction(logger, clusterService, searchTransportService, - searchPhaseController, request, scrollId, listener); + searchPhaseController, request, (SearchTask)task, scrollId, listener); break; case QUERY_AND_FETCH_TYPE: action = new SearchScrollQueryAndFetchAsyncAction(logger, clusterService, searchTransportService, - searchPhaseController, request, scrollId, listener); + searchPhaseController, request, (SearchTask)task, scrollId, listener); break; default: throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized"); diff --git a/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java b/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java index ee260ddd1e1..d6e06613d59 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java +++ b/core/src/main/java/org/elasticsearch/action/support/AbstractListenableActionFuture.java @@ -28,9 +28,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.List; -/** - * - */ public abstract class AbstractListenableActionFuture extends AdapterActionFuture implements ListenableActionFuture { private static final Logger logger = Loggers.getLogger(AbstractListenableActionFuture.class); diff --git a/core/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java b/core/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java index 7217961d899..98481eccfbb 100644 --- a/core/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java +++ b/core/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.ClusterServiceState; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -69,14 +70,14 @@ public class ActiveShardsObserver extends AbstractComponent { } final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext()); - if (activeShardCount.enoughShardsActive(observer.observedState(), indexName)) { + if (activeShardCount.enoughShardsActive(observer.observedState().getClusterState(), indexName)) { onResult.accept(true); } else { final ClusterStateObserver.ChangePredicate shardsAllocatedPredicate = new ClusterStateObserver.ValidationPredicate() { @Override - protected boolean validate(final ClusterState newState) { - return activeShardCount.enoughShardsActive(newState, indexName); + protected boolean validate(final ClusterServiceState newState) { + return activeShardCount.enoughShardsActive(newState.getClusterState(), indexName); } }; diff --git a/core/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java b/core/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java index eab486f4929..b2167c3051b 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java +++ b/core/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java @@ -31,9 +31,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -/** - * - */ public abstract class AdapterActionFuture extends BaseFuture implements ActionFuture, ActionListener { @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index 0fe3be1ad63..3f7df803e24 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -31,9 +31,6 @@ import java.io.IOException; import static org.elasticsearch.ExceptionsHelper.detailedMessage; -/** - * - */ public class DefaultShardOperationFailedException implements ShardOperationFailedException { private String index; diff --git a/core/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java b/core/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java index f520def6e6d..094f82ae31f 100644 --- a/core/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java +++ b/core/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java @@ -19,9 +19,6 @@ package org.elasticsearch.action.support; -/** - * - */ public class PlainActionFuture extends AdapterActionFuture { public static PlainActionFuture newFuture() { diff --git a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java index 7d1a091d6b3..b348d2ec317 100644 --- a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -38,9 +38,6 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.action.support.PlainActionFuture.newFuture; -/** - * - */ public abstract class TransportAction, Response extends ActionResponse> extends AbstractComponent { protected final ThreadPool threadPool; @@ -141,17 +138,8 @@ public abstract class TransportAction, Re listener = new TaskResultStoringActionListener<>(taskManager, task, listener); } - if (filters.length == 0) { - try { - doExecute(task, request, listener); - } catch(Exception e) { - logger.trace("Error during transport action execution.", e); - listener.onFailure(e); - } - } else { - RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger); - requestFilterChain.proceed(task, actionName, request, listener); - } + RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger); + requestFilterChain.proceed(task, actionName, request, listener); } protected void doExecute(Task task, Request request, ActionListener listener) { diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java index 6170d967002..fb995513acb 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java @@ -24,8 +24,6 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public abstract class BroadcastOperationRequestBuilder, Response extends BroadcastResponse, RequestBuilder extends BroadcastOperationRequestBuilder> extends ActionRequestBuilder { diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java index 508581050a6..b6ab85c0b18 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java @@ -28,9 +28,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - * - */ public class BroadcastRequest> extends ActionRequest implements IndicesRequest.Replaceable { protected String[] indices; diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java index 921724e6572..1012e8930bb 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java @@ -29,9 +29,6 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; -/** - * - */ public abstract class BroadcastShardRequest extends TransportRequest implements IndicesRequest { private ShardId shardId; diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java index 398a8d6c905..6845e6ced6c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java @@ -26,9 +26,6 @@ import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -/** - * - */ public abstract class BroadcastShardResponse extends TransportResponse { ShardId shardId; diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 87ef385a243..c48fa1e8122 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -44,13 +44,11 @@ import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.Supplier; -/** - * - */ public abstract class TransportBroadcastAction, Response extends BroadcastResponse, ShardRequest extends BroadcastShardRequest, ShardResponse extends BroadcastShardResponse> extends HandledTransportAction { @@ -86,9 +84,9 @@ public abstract class TransportBroadcastAction> extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/core/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java index 1e871a41867..96e0ad8ee6b 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java @@ -25,8 +25,6 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBui import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; -/** - */ public abstract class ClusterInfoRequestBuilder, Response extends ActionResponse, Builder extends ClusterInfoRequestBuilder> extends MasterNodeReadOperationRequestBuilder { diff --git a/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java b/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java index 66b9fce5d71..59b1997b356 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java @@ -31,8 +31,6 @@ import org.elasticsearch.transport.TransportService; import java.util.function.Supplier; -/** - */ public abstract class TransportClusterInfoAction, Response extends ActionResponse> extends TransportMasterNodeReadAction { diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeRequest.java b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeRequest.java index f743ff02c74..1ce32247eab 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodeRequest.java @@ -25,9 +25,6 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; -/** - * - */ public abstract class BaseNodeRequest extends TransportRequest { private String nodeId; diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java index 663537f25da..4523e8d339a 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java @@ -29,9 +29,6 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; -/** - * - */ public abstract class BaseNodesRequest> extends ActionRequest { /** diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java index 462f0b07bd6..7d97a7b5a92 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java @@ -31,9 +31,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -/** - * - */ public abstract class BaseNodesResponse extends ActionResponse { private ClusterName clusterName; diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java index cf8190f2c32..ffb43e7ee04 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java @@ -24,8 +24,6 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; -/** - */ public abstract class NodesOperationRequestBuilder, Response extends BaseNodesResponse, RequestBuilder extends NodesOperationRequestBuilder> extends ActionRequestBuilder { diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 3582f5f5aaf..6cc063d5af1 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -106,17 +106,18 @@ public abstract class TransportNodesAction responses = new ArrayList<>(); final List failures = new ArrayList<>(); + final boolean accumulateExceptions = accumulateExceptions(); for (int i = 0; i < nodesResponses.length(); ++i) { Object response = nodesResponses.get(i); - if (nodeResponseClass.isInstance(response)) { - responses.add(nodeResponseClass.cast(response)); - } else if (response instanceof FailedNodeException) { - failures.add((FailedNodeException)response); + if (response instanceof FailedNodeException) { + if (accumulateExceptions) { + failures.add((FailedNodeException)response); + } else { + logger.warn("not accumulating exceptions, excluding exception from response", (FailedNodeException)response); + } } else { - logger.warn("ignoring unexpected response [{}] of type [{}], expected [{}] or [{}]", - response, response != null ? response.getClass().getName() : null, - nodeResponseClass.getSimpleName(), FailedNodeException.class.getSimpleName()); + responses.add(nodeResponseClass.cast(response)); } } @@ -243,9 +244,7 @@ public abstract class TransportNodesAction) () -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); } - if (accumulateExceptions()) { - responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); - } + responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); if (counter.incrementAndGet() == responses.length()) { finishHim(); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 76d1ff07b88..47284789850 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -112,22 +113,24 @@ public class ReplicationOperation< primaryResult = primary.perform(request); primary.updateLocalCheckpointForShard(primaryRouting.allocationId().getId(), primary.localCheckpoint()); final ReplicaRequest replicaRequest = primaryResult.replicaRequest(); - assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; - if (logger.isTraceEnabled()) { - logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); + if (replicaRequest != null) { + assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; + if (logger.isTraceEnabled()) { + logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); + } + + // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics. + // we have to make sure that every operation indexed into the primary after recovery start will also be replicated + // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. + ClusterState clusterState = clusterStateSupplier.get(); + final List shards = getShards(primaryId, clusterState); + Set inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState); + + markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards); + + performOnReplicas(replicaRequest, shards); } - // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics. - // we have to make sure that every operation indexed into the primary after recovery start will also be replicated - // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. - ClusterState clusterState = clusterStateSupplier.get(); - final List shards = getShards(primaryId, clusterState); - Set inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState); - - markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards); - - performOnReplicas(replicaRequest, shards); - successfulShards.incrementAndGet(); // mark primary as successful decPendingAndFinishIfNeeded(); } @@ -445,7 +448,11 @@ public class ReplicationOperation< public interface PrimaryResult> { - R replicaRequest(); + /** + * @return null if no operation needs to be sent to a replica + * (for example when the operation failed on the primary due to a parsing exception) + */ + @Nullable R replicaRequest(); void setShardInfo(ReplicationResponse.ShardInfo shardInfo); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java index 6c25882d141..4ef20fcb15b 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java @@ -26,8 +26,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; -/** - */ public abstract class ReplicationRequestBuilder, Response extends ActionResponse, RequestBuilder extends ReplicationRequestBuilder> extends ActionRequestBuilder { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java index e6ce0a5aad4..98556494191 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java @@ -24,7 +24,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -34,6 +33,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.Arrays; /** * Base class for write action responses. @@ -162,7 +162,11 @@ public class ReplicationResponse extends ActionResponse { @Override public String toString() { - return Strings.toString(this); + return "ShardInfo{" + + "total=" + total + + ", successful=" + successful + + ", failures=" + Arrays.toString(failures) + + '}'; } public static ShardInfo readShardInfo(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 0dc482ab923..024672922fa 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -94,16 +94,16 @@ public abstract class TransportReplicationAction< Response extends ReplicationResponse > extends TransportAction { - protected final TransportService transportService; + private final TransportService transportService; protected final ClusterService clusterService; - protected final IndicesService indicesService; + private final IndicesService indicesService; private final ShardStateAction shardStateAction; private final TransportRequestOptions transportOptions; private final String executor; // package private for testing - final String transportReplicaAction; - final String transportPrimaryAction; + private final String transportReplicaAction; + private final String transportPrimaryAction; private final ReplicasProxy replicasProxy; protected TransportReplicationAction(Settings settings, String actionName, TransportService transportService, @@ -168,14 +168,18 @@ public abstract class TransportReplicationAction< * Primary operation on node with primary copy. * * @param shardRequest the request to the primary shard + * @param primary the primary shard to perform the operation on */ - protected abstract PrimaryResult shardOperationOnPrimary(Request shardRequest) throws Exception; + protected abstract PrimaryResult shardOperationOnPrimary(Request shardRequest, IndexShard primary) throws Exception; /** * Synchronous replica operation on nodes with replica copies. This is done under the lock form - * {@link #acquireReplicaOperationLock(ShardId, long, String, ActionListener)} + * {@link IndexShard#acquireReplicaOperationLock(long, ActionListener, String)} + * + * @param shardRequest the request to the replica shard + * @param replica the replica shard to perform the operation on */ - protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest); + protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest, IndexShard replica) throws Exception; /** * Cluster level block to check before request execution @@ -204,7 +208,7 @@ public abstract class TransportReplicationAction< protected boolean retryPrimaryException(final Throwable e) { return e.getClass() == ReplicationOperation.RetryOnPrimaryException.class - || TransportActions.isShardNotAvailableException(e); + || TransportActions.isShardNotAvailableException(e); } class OperationTransportHandler implements TransportRequestHandler { @@ -307,17 +311,10 @@ public abstract class TransportReplicationAction< final IndexMetaData indexMetaData = clusterService.state().getMetaData().index(request.shardId().getIndex()); final boolean executeOnReplicas = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings()); final ActionListener listener = createResponseListener(primaryShardReference); - createReplicatedOperation(request, new ActionListener() { - @Override - public void onResponse(PrimaryResult result) { - result.respond(listener); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }, primaryShardReference, executeOnReplicas).execute(); + createReplicatedOperation(request, + ActionListener.wrap(result -> result.respond(listener), listener::onFailure), + primaryShardReference, executeOnReplicas) + .execute(); } } catch (Exception e) { Releasables.closeWhileHandlingException(primaryShardReference); // release shard operation lock before responding to caller @@ -373,11 +370,24 @@ public abstract class TransportReplicationAction< protected class PrimaryResult implements ReplicationOperation.PrimaryResult { final ReplicaRequest replicaRequest; - final Response finalResponse; + final Response finalResponseIfSuccessful; + final Exception finalFailure; - public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) { + /** + * Result of executing a primary operation + * expects finalResponseIfSuccessful or finalFailure to be not-null + */ + public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponseIfSuccessful, Exception finalFailure) { + assert finalFailure != null ^ finalResponseIfSuccessful != null + : "either a response or a failure has to be not null, " + + "found [" + finalFailure + "] failure and ["+ finalResponseIfSuccessful + "] response"; this.replicaRequest = replicaRequest; - this.finalResponse = finalResponse; + this.finalResponseIfSuccessful = finalResponseIfSuccessful; + this.finalFailure = finalFailure; + } + + public PrimaryResult(ReplicaRequest replicaRequest, Response replicationResponse) { + this(replicaRequest, replicationResponse, null); } @Override @@ -387,22 +397,37 @@ public abstract class TransportReplicationAction< @Override public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) { - finalResponse.setShardInfo(shardInfo); + if (finalResponseIfSuccessful != null) { + finalResponseIfSuccessful.setShardInfo(shardInfo); + } } public void respond(ActionListener listener) { - listener.onResponse(finalResponse); + if (finalResponseIfSuccessful != null) { + listener.onResponse(finalResponseIfSuccessful); + } else { + listener.onFailure(finalFailure); + } } } protected class ReplicaResult { - /** - * Public constructor so subclasses can call it. - */ - public ReplicaResult() {} + final Exception finalFailure; + + public ReplicaResult(Exception finalFailure) { + this.finalFailure = finalFailure; + } + + public ReplicaResult() { + this(null); + } public void respond(ActionListener listener) { - listener.onResponse(Empty.INSTANCE); + if (finalFailure == null) { + listener.onResponse(TransportResponse.Empty.INSTANCE); + } else { + listener.onFailure(finalFailure); + } } } @@ -432,11 +457,12 @@ public abstract class TransportReplicationAction< } } - private final class AsyncReplicaAction extends AbstractRunnable implements ActionListener { + private final class AsyncReplicaAction extends AbstractRunnable implements ActionListener { private final ReplicaRequest request; // allocation id of the replica this request is meant for private final String targetAllocationID; private final TransportChannel channel; + private final IndexShard replica; /** * The task on the node with the replica shard. */ @@ -450,18 +476,21 @@ public abstract class TransportReplicationAction< this.channel = channel; this.task = task; this.targetAllocationID = targetAllocationID; + final ShardId shardId = request.shardId(); + assert shardId != null : "request shardId must be set"; + this.replica = getIndexShard(shardId); } @Override - public void onResponse(ShardReference replica) { + public void onResponse(Releasable releasable) { try { - ReplicaResult replicaResult = shardOperationOnReplica(request); - replica.close(); // release shard operation lock before responding to caller + ReplicaResult replicaResult = shardOperationOnReplica(request, replica); + releasable.close(); // release shard operation lock before responding to caller final TransportReplicationAction.ReplicaResponse response = - new ReplicaResponse(replica.routingEntry().allocationId().getId(), replica.getLocalCheckpoint()); + new ReplicaResponse(replica.routingEntry().allocationId().getId(), replica.getLocalCheckpoint()); replicaResult.respond(new ResponseListener(response)); } catch (Exception e) { - Releasables.closeWhileHandlingException(replica); // release shard operation lock before responding to caller + Releasables.closeWhileHandlingException(releasable); // release shard operation lock before responding to caller AsyncReplicaAction.this.onFailure(e); } } @@ -476,6 +505,7 @@ public abstract class TransportReplicationAction< transportReplicaAction, request), e); + request.onRetry(); final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override @@ -524,8 +554,12 @@ public abstract class TransportReplicationAction< @Override protected void doRun() throws Exception { setPhase(task, "replica"); - assert request.shardId() != null : "request shardId must be set"; - acquireReplicaOperationLock(request.shardId(), request.primaryTerm(), targetAllocationID, this); + final String actualAllocationId = this.replica.routingEntry().allocationId().getId(); + if (actualAllocationId.equals(targetAllocationID) == false) { + throw new ShardNotFoundException(this.replica.shardId(), "expected aID [{}] but found [{}]", targetAllocationID, + actualAllocationId); + } + replica.acquireReplicaOperationLock(request.primaryTerm, this, executor); } /** @@ -559,6 +593,11 @@ public abstract class TransportReplicationAction< } } + private IndexShard getIndexShard(ShardId shardId) { + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + return indexService.getShard(shardId.id()); + } + /** * Responsible for routing and retrying failed operations on the primary. * The actual primary operation is done in {@link ReplicationOperation} on the @@ -591,7 +630,7 @@ public abstract class TransportReplicationAction< @Override protected void doRun() { setPhase(task, "routing"); - final ClusterState state = observer.observedState(); + final ClusterState state = observer.observedState().getClusterState(); if (handleBlockExceptions(state)) { return; } @@ -822,13 +861,12 @@ public abstract class TransportReplicationAction< } /** - * tries to acquire reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally + * Tries to acquire reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally * and replication of the operation to all replica shards is completed / failed (see {@link ReplicationOperation}). */ - protected void acquirePrimaryShardReference(ShardId shardId, String allocationId, - ActionListener onReferenceAcquired) { - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - IndexShard indexShard = indexService.getShard(shardId.id()); + private void acquirePrimaryShardReference(ShardId shardId, String allocationId, + ActionListener onReferenceAcquired) { + IndexShard indexShard = getIndexShard(shardId); // we may end up here if the cluster state used to route the primary is so stale that the underlying // index shard was replaced with a replica. For example - in a two node cluster, if the primary fails // the replica will take over and a replica will be assigned to the first node. @@ -856,37 +894,6 @@ public abstract class TransportReplicationAction< indexShard.acquirePrimaryOperationLock(onAcquired, executor); } - /** - * tries to acquire an operation on replicas. The lock is closed as soon as replication is completed on the node. - */ - protected void acquireReplicaOperationLock( - ShardId shardId, - long primaryTerm, - final String allocationId, - ActionListener onLockAcquired) { - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - IndexShard indexShard = indexService.getShard(shardId.id()); - - final String actualAllocationId = indexShard.routingEntry().allocationId().getId(); - if (actualAllocationId.equals(allocationId) == false) { - throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId); - } - - final ActionListener onAcquired = new ActionListener() { - @Override - public void onResponse(Releasable releasable) { - onLockAcquired.onResponse(new ShardReference(indexShard, releasable)); - } - - @Override - public void onFailure(Exception e) { - onLockAcquired.onFailure(e); - } - }; - - indexShard.acquireReplicaOperationLock(primaryTerm, onAcquired, executor); - } - /** * Indicated whether this operation should be replicated to shadow replicas or not. If this method returns true the replication phase * will be skipped. For example writes such as index and delete don't need to be replicated on shadow replicas but refresh and flush do. @@ -941,8 +948,10 @@ public abstract class TransportReplicationAction< @Override public PrimaryResult perform(Request request) throws Exception { - PrimaryResult result = shardOperationOnPrimary(request); - result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); + PrimaryResult result = shardOperationOnPrimary(request, indexShard); + if (result.replicaRequest() != null) { + result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); + } return result; } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index bf2b3235b11..15f269c46f5 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -29,9 +29,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndicesService; @@ -46,92 +44,63 @@ import java.util.function.Supplier; /** * Base class for transport actions that modify data in some shard like index, delete, and shardBulk. + * Allows performing async actions (e.g. refresh) after performing write operations on primary and replica shards */ public abstract class TransportWriteAction< Request extends ReplicatedWriteRequest, + ReplicaRequest extends ReplicatedWriteRequest, Response extends ReplicationResponse & WriteResponse - > extends TransportReplicationAction { + > extends TransportReplicationAction { protected TransportWriteAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, - String executor) { + Supplier replicaRequest, String executor) { super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, request, request, executor); + indexNameExpressionResolver, request, replicaRequest, executor); } /** - * Called on the primary with a reference to the {@linkplain IndexShard} to modify. - */ - protected abstract WriteResult onPrimaryShard(Request request, IndexShard indexShard) throws Exception; - - /** - * Called once per replica with a reference to the {@linkplain IndexShard} to modify. + * Called on the primary with a reference to the primary {@linkplain IndexShard} to modify. * - * @return the translog location of the {@linkplain IndexShard} after the write was completed or null if no write occurred + * @return the result of the operation on primary, including current translog location and operation response and failure + * async refresh is performed on the primary shard according to the Request refresh policy */ - protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); - @Override - protected final WritePrimaryResult shardOperationOnPrimary(Request request) throws Exception { - IndexShard indexShard = indexShard(request); - WriteResult result = onPrimaryShard(request, indexShard); - return new WritePrimaryResult(request, result.getResponse(), result.getLocation(), indexShard); - } - - @Override - protected final WriteReplicaResult shardOperationOnReplica(Request request) { - IndexShard indexShard = indexShard(request); - Translog.Location location = onReplicaShard(request, indexShard); - return new WriteReplicaResult(indexShard, request, location); - } + protected abstract WritePrimaryResult shardOperationOnPrimary(Request request, IndexShard primary) throws Exception; /** - * Fetch the IndexShard for the request. Protected so it can be mocked in tests. + * Called once per replica with a reference to the replica {@linkplain IndexShard} to modify. + * + * @return the result of the operation on replica, including current translog location and operation response and failure + * async refresh is performed on the replica shard according to the ReplicaRequest refresh policy */ - protected IndexShard indexShard(Request request) { - final ShardId shardId = request.shardId(); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - return indexService.getShard(shardId.id()); - } - - /** - * Simple result from a write action. Write actions have static method to return these so they can integrate with bulk. - */ - public static class WriteResult { - private final Response response; - private final Translog.Location location; - - public WriteResult(Response response, @Nullable Location location) { - this.response = response; - this.location = location; - } - - public Response getResponse() { - return response; - } - - public Translog.Location getLocation() { - return location; - } - } + @Override + protected abstract WriteReplicaResult shardOperationOnReplica(ReplicaRequest request, IndexShard replica) throws Exception; /** * Result of taking the action on the primary. */ - class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { + protected class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { boolean finishedAsyncActions; ActionListener listener = null; - public WritePrimaryResult(Request request, Response finalResponse, - @Nullable Translog.Location location, - IndexShard indexShard) { - super(request, finalResponse); - /* - * We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the - * refresh in parallel on the primary and on the replica. - */ - new AsyncAfterWriteAction(indexShard, request, location, this, logger).run(); + public WritePrimaryResult(ReplicaRequest request, @Nullable Response finalResponse, + @Nullable Location location, @Nullable Exception operationFailure, + IndexShard primary) { + super(request, finalResponse, operationFailure); + assert location == null || operationFailure == null + : "expected either failure to be null or translog location to be null, " + + "but found: [" + location + "] translog location and [" + operationFailure + "] failure"; + if (operationFailure != null) { + this.finishedAsyncActions = true; + } else { + /* + * We call this before replication because this might wait for a refresh and that can take a while. + * This way we wait for the refresh in parallel on the primary and on the replica. + */ + new AsyncAfterWriteAction(primary, request, location, this, logger).run(); + } } @Override @@ -160,7 +129,7 @@ public abstract class TransportWriteAction< @Override public synchronized void onSuccess(boolean forcedRefresh) { - finalResponse.setForcedRefresh(forcedRefresh); + finalResponseIfSuccessful.setForcedRefresh(forcedRefresh); finishedAsyncActions = true; respondIfPossible(null); } @@ -169,12 +138,18 @@ public abstract class TransportWriteAction< /** * Result of taking the action on the replica. */ - class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { + protected class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { boolean finishedAsyncActions; private ActionListener listener; - public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { - new AsyncAfterWriteAction(indexShard, request, location, this, logger).run(); + public WriteReplicaResult(ReplicaRequest request, @Nullable Location location, + @Nullable Exception operationFailure, IndexShard replica) { + super(operationFailure); + if (operationFailure != null) { + this.finishedAsyncActions = true; + } else { + new AsyncAfterWriteAction(replica, request, location, this, logger).run(); + } } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java index cb9a6ab9f69..a6bb0f8e0a1 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java @@ -32,9 +32,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.concurrent.TimeUnit; -/** - * - */ public abstract class InstanceShardOperationRequest> extends ActionRequest implements IndicesRequest { diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java index 13266b9151d..7bec08ce9ec 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java @@ -25,8 +25,6 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; -/** - */ public abstract class InstanceShardOperationRequestBuilder, Response extends ActionResponse, RequestBuilder extends InstanceShardOperationRequestBuilder> extends ActionRequestBuilder { diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java index 81da5ec9a86..c5014adf570 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -49,9 +49,6 @@ import org.elasticsearch.transport.TransportService; import java.util.function.Supplier; -/** - * - */ public abstract class TransportInstanceSingleOperationAction, Response extends ActionResponse> extends HandledTransportAction { protected final ClusterService clusterService; @@ -127,9 +124,10 @@ public abstract class TransportInstanceSingleOperationAction, Response extends ActionResponse, RequestBuilder extends SingleShardOperationRequestBuilder> extends ActionRequestBuilder { diff --git a/core/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardRequest.java b/core/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardRequest.java index 499932fce68..4265fa98337 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardRequest.java @@ -31,9 +31,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ public abstract class SingleShardRequest> extends ActionRequest implements IndicesRequest { public static final IndicesOptions INDICES_OPTIONS = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); diff --git a/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 8981caa60f7..811dcbed3dc 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -46,6 +46,7 @@ import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.function.Supplier; import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException; @@ -94,7 +95,7 @@ public abstract class TransportSingleShardAction> extends public static final String[] ALL_NODES = Strings.EMPTY_ARRAY; - private String[] nodesIds = ALL_NODES; + private String[] nodes = ALL_NODES; private TimeValue timeout; @@ -58,7 +58,7 @@ public class BaseTasksRequest> extends @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (taskId.isSet() && nodesIds.length > 0) { + if (taskId.isSet() && nodes.length > 0) { validationException = addValidationError("task id cannot be used together with node ids", validationException); } @@ -81,13 +81,13 @@ public class BaseTasksRequest> extends return actions; } - public final String[] getNodesIds() { - return nodesIds; + public final String[] getNodes() { + return nodes; } @SuppressWarnings("unchecked") - public final Request setNodesIds(String... nodesIds) { - this.nodesIds = nodesIds; + public final Request setNodes(String... nodes) { + this.nodes = nodes; return (Request) this; } @@ -142,7 +142,7 @@ public class BaseTasksRequest> extends super.readFrom(in); taskId = TaskId.readFromStream(in); parentTaskId = TaskId.readFromStream(in); - nodesIds = in.readStringArray(); + nodes = in.readStringArray(); actions = in.readStringArray(); timeout = in.readOptionalWriteable(TimeValue::new); } @@ -152,7 +152,7 @@ public class BaseTasksRequest> extends super.writeTo(out); taskId.writeTo(out); parentTaskId.writeTo(out); - out.writeStringArrayNullable(nodesIds); + out.writeStringArrayNullable(nodes); out.writeStringArrayNullable(actions); out.writeOptionalWriteable(timeout); } diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java b/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java index 43be2b46db1..b62cfd714bb 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java @@ -19,16 +19,22 @@ package org.elasticsearch.action.support.tasks; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.stream.Stream; + +import static java.util.stream.Collectors.toList; +import static org.elasticsearch.ExceptionsHelper.rethrowAndSuppress; /** @@ -60,6 +66,17 @@ public class BaseTasksResponse extends ActionResponse { return nodeFailures; } + /** + * Rethrow task failures if there are any. + */ + public void rethrowFailures(String operationName) { + rethrowAndSuppress(Stream.concat( + getNodeFailures().stream(), + getTaskFailures().stream().map(f -> new ElasticsearchException( + "{} of [{}] failed", f.getCause(), operationName, new TaskId(f.getNodeId(), f.getTaskId())))) + .collect(toList())); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java index a3528cb75c4..656dae99928 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java @@ -48,7 +48,7 @@ public class TasksRequestBuilder< @SuppressWarnings("unchecked") public final RequestBuilder setNodesIds(String... nodesIds) { - request.setNodesIds(nodesIds); + request.setNodes(nodesIds); return (RequestBuilder) this; } @@ -63,5 +63,14 @@ public class TasksRequestBuilder< request.setTimeout(timeout); return (RequestBuilder) this; } + + /** + * Match all children of the provided task. + */ + @SuppressWarnings("unchecked") + public final RequestBuilder setParentTaskId(TaskId taskId) { + request.setParentTaskId(taskId); + return (RequestBuilder) this; + } } diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index 6752ccd7293..ee384b819b0 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -33,10 +33,12 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.NodeShouldNotConnectException; @@ -57,6 +59,8 @@ import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.Consumer; import java.util.function.Supplier; +import static java.util.Collections.emptyList; + /** * The base class for transport actions that are interacting with currently running tasks. */ @@ -100,21 +104,56 @@ public abstract class TransportTasksAction< new AsyncAction(task, request, listener).start(); } - private NodeTasksResponse nodeOperation(NodeTaskRequest nodeTaskRequest) { + private void nodeOperation(NodeTaskRequest nodeTaskRequest, ActionListener listener) { TasksRequest request = nodeTaskRequest.tasksRequest; - List results = new ArrayList<>(); - List exceptions = new ArrayList<>(); - processTasks(request, task -> { - try { - TaskResponse response = taskOperation(request, task); - if (response != null) { - results.add(response); + List tasks = new ArrayList<>(); + processTasks(request, tasks::add); + if (tasks.isEmpty()) { + listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), emptyList(), emptyList())); + return; + } + AtomicArray> responses = new AtomicArray<>(tasks.size()); + final AtomicInteger counter = new AtomicInteger(tasks.size()); + for (int i = 0; i < tasks.size(); i++) { + final int taskIndex = i; + ActionListener taskListener = new ActionListener() { + @Override + public void onResponse(TaskResponse response) { + responses.setOnce(taskIndex, response == null ? null : new Tuple<>(response, null)); + respondIfFinished(); } - } catch (Exception ex) { - exceptions.add(new TaskOperationFailure(clusterService.localNode().getId(), task.getId(), ex)); + + @Override + public void onFailure(Exception e) { + responses.setOnce(taskIndex, new Tuple<>(null, e)); + respondIfFinished(); + } + + private void respondIfFinished() { + if (counter.decrementAndGet() != 0) { + return; + } + List results = new ArrayList<>(); + List exceptions = new ArrayList<>(); + for (AtomicArray.Entry> response : responses.asList()) { + if (response.value.v1() == null) { + assert response.value.v2() != null; + exceptions.add(new TaskOperationFailure(clusterService.localNode().getId(), tasks.get(taskIndex).getId(), + response.value.v2())); + } else { + assert response.value.v2() == null; + results.add(response.value.v1()); + } + } + listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions)); + } + }; + try { + taskOperation(request, tasks.get(taskIndex), taskListener); + } catch (Exception e) { + taskListener.onFailure(e); } - }); - return new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions); + } } protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) { @@ -125,7 +164,7 @@ public abstract class TransportTasksAction< if (request.getTaskId().isSet()) { return new String[]{request.getTaskId().getNodeId()}; } else { - return clusterState.nodes().resolveNodes(request.getNodesIds()); + return clusterState.nodes().resolveNodes(request.getNodes()); } } @@ -178,7 +217,10 @@ public abstract class TransportTasksAction< protected abstract TaskResponse readTaskResponse(StreamInput in) throws IOException; - protected abstract TaskResponse taskOperation(TasksRequest request, OperationTask task); + /** + * Perform the required operation on the task. It is OK start an asynchronous operation or to throw an exception but not both. + */ + protected abstract void taskOperation(TasksRequest request, OperationTask task, ActionListener listener); protected boolean transportCompress() { return false; @@ -305,7 +347,27 @@ public abstract class TransportTasksAction< @Override public void messageReceived(final NodeTaskRequest request, final TransportChannel channel) throws Exception { - channel.sendResponse(nodeOperation(request)); + nodeOperation(request, new ActionListener() { + @Override + public void onResponse( + TransportTasksAction.NodeTasksResponse response) { + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (IOException e1) { + e1.addSuppressed(e); + logger.warn("Failed to send failure", e1); + } + } + }); } } diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java index 9cc328c2be7..a84ba25f5bb 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class MultiTermVectorsAction extends Action { public static final MultiTermVectorsAction INSTANCE = new MultiTermVectorsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java index af192cea600..3cd73226e73 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java @@ -94,7 +94,7 @@ public class MultiTermVectorsRequest extends ActionRequest 0) { diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java index a4c53ee4a2d..982f7ad52c1 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class TermVectorsAction extends Action { public static final TermVectorsAction INSTANCE = new TermVectorsAction(); diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java index 0ae8824ce8d..534ef4164e2 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java @@ -105,13 +105,13 @@ import static org.apache.lucene.util.ArrayUtil.grow; *

  • vint: frequency (always returned)
  • *
  • *
      - *
    • vint: position_1 (if positions == true)
    • - *
    • vint: startOffset_1 (if offset == true)
    • - *
    • vint: endOffset_1 (if offset == true)
    • - *
    • BytesRef: payload_1 (if payloads == true)
    • + *
    • vint: position_1 (if positions)
    • + *
    • vint: startOffset_1 (if offset)
    • + *
    • vint: endOffset_1 (if offset)
    • + *
    • BytesRef: payload_1 (if payloads)
    • *
    • ...
    • - *
    • vint: endOffset_freqency (if offset == true)
    • - *
    • BytesRef: payload_freqency (if payloads == true)
    • + *
    • vint: endOffset_freqency (if offset)
    • + *
    • BytesRef: payload_freqency (if payloads)
    • *
  • * */ diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index a660ede0ba8..b83713e3a6a 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.RealtimeRequest; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.get.MultiGetRequest; @@ -56,7 +55,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; * Note, the {@link #index()}, {@link #type(String)} and {@link #id(String)} are * required. */ -public class TermVectorsRequest extends SingleShardRequest implements DocumentRequest, RealtimeRequest { +public class TermVectorsRequest extends SingleShardRequest implements RealtimeRequest { private String type; @@ -200,7 +199,6 @@ public class TermVectorsRequest extends SingleShardRequest i /** * Returns the type of document to get the term vector for. */ - @Override public String type() { return type; } @@ -208,7 +206,6 @@ public class TermVectorsRequest extends SingleShardRequest i /** * Returns the id of document the term vector is requested for. */ - @Override public String id() { return id; } @@ -250,18 +247,15 @@ public class TermVectorsRequest extends SingleShardRequest i /** * @return The routing for this request. */ - @Override public String routing() { return routing; } - @Override public TermVectorsRequest routing(String routing) { this.routing = routing; return this; } - @Override public String parent() { return parent; } diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index b552807c9e1..e7fd323088a 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -174,7 +174,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio final ShardId shardId = request.getShardId(); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); final IndexShard indexShard = indexService.getShard(shardId.getId()); - final UpdateHelper.Result result = updateHelper.prepare(request, indexShard); + final UpdateHelper.Result result = updateHelper.prepare(request, indexShard, threadPool::estimatedTimeInMillis); switch (result.getResponseResult()) { case CREATED: IndexRequest upsertRequest = result.action(); diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/UpdateAction.java index 4ac1002dbc6..7d7997a7a0b 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.update; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class UpdateAction extends Action { public static final UpdateAction INSTANCE = new UpdateAction(); diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 49206470532..0e37b6ff064 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; @@ -59,15 +58,14 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.LongSupplier; /** * Helper for translating an update request to an index, delete request or update response. */ public class UpdateHelper extends AbstractComponent { - private final ScriptService scriptService; - @Inject public UpdateHelper(Settings settings, ScriptService scriptService) { super(settings); this.scriptService = scriptService; @@ -76,19 +74,18 @@ public class UpdateHelper extends AbstractComponent { /** * Prepares an update request by converting it into an index or delete request or an update response (no action). */ - @SuppressWarnings("unchecked") - public Result prepare(UpdateRequest request, IndexShard indexShard) { + public Result prepare(UpdateRequest request, IndexShard indexShard, LongSupplier nowInMillis) { final GetResult getResult = indexShard.getService().get(request.type(), request.id(), new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME, TimestampFieldMapper.NAME}, true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE); - return prepare(indexShard.shardId(), request, getResult); + return prepare(indexShard.shardId(), request, getResult, nowInMillis); } /** * Prepares an update request by converting it into an index or delete request or an update response (no action). */ @SuppressWarnings("unchecked") - protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult getResult) { + protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult getResult, LongSupplier nowInMillis) { long getDateNS = System.nanoTime(); if (!getResult.isExists()) { if (request.upsertRequest() == null && !request.docAsUpsert()) { @@ -104,6 +101,7 @@ public class UpdateHelper extends AbstractComponent { // Tell the script that this is a create and not an update ctx.put("op", "create"); ctx.put("_source", upsertDoc); + ctx.put("_now", nowInMillis.getAsLong()); ctx = executeScript(request.script, ctx); //Allow the script to set TTL using ctx._ttl if (ttl == null) { @@ -118,7 +116,7 @@ public class UpdateHelper extends AbstractComponent { if (!"create".equals(scriptOpChoice)) { if (!"none".equals(scriptOpChoice)) { logger.warn("Used upsert operation [{}] for script [{}], doing nothing...", scriptOpChoice, - request.script.getScript()); + request.script.getIdOrCode()); } UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), DocWriteResponse.Result.NOOP); @@ -143,7 +141,12 @@ public class UpdateHelper extends AbstractComponent { return new Result(indexRequest, DocWriteResponse.Result.CREATED, null, null); } - final long updateVersion = getResult.getVersion(); + long updateVersion = getResult.getVersion(); + + if (request.versionType() != VersionType.INTERNAL) { + assert request.versionType() == VersionType.FORCE; + updateVersion = request.version(); // remember, match_any is excluded by the conflict test + } if (getResult.internalSourceRef() == null) { // no source, we can't do nothing, through a failure... @@ -192,6 +195,7 @@ public class UpdateHelper extends AbstractComponent { ctx.put("_timestamp", originalTimestamp); ctx.put("_ttl", originalTtl); ctx.put("_source", sourceAndContent.v2()); + ctx.put("_now", nowInMillis.getAsLong()); ctx = executeScript(request.script, ctx); @@ -238,7 +242,7 @@ public class UpdateHelper extends AbstractComponent { update.setGetResult(extractGetResult(request, request.index(), getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, getResult.internalSourceRef())); return new Result(update, DocWriteResponse.Result.NOOP, updatedSourceAsMap, updateSourceContentType); } else { - logger.warn("Used update operation [{}] for script [{}], doing nothing...", operation, request.script.getScript()); + logger.warn("Used update operation [{}] for script [{}], doing nothing...", operation, request.script.getIdOrCode()); UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), DocWriteResponse.Result.NOOP); return new Result(update, DocWriteResponse.Result.NOOP, updatedSourceAsMap, updateSourceContentType); } @@ -247,7 +251,7 @@ public class UpdateHelper extends AbstractComponent { private Map executeScript(Script script, Map ctx) { try { if (scriptService != null) { - ExecutableScript executableScript = scriptService.executable(script, ScriptContext.Standard.UPDATE, Collections.emptyMap()); + ExecutableScript executableScript = scriptService.executable(script, ScriptContext.Standard.UPDATE); executableScript.setNextVar("ctx", ctx); executableScript.run(); // we need to unwrap the ctx... @@ -308,7 +312,7 @@ public class UpdateHelper extends AbstractComponent { if (request.fetchSource() != null && request.fetchSource().fetchSource()) { sourceRequested = true; if (request.fetchSource().includes().length > 0 || request.fetchSource().excludes().length > 0) { - Object value = sourceLookup.filter(request.fetchSource().includes(), request.fetchSource().excludes()); + Object value = sourceLookup.filter(request.fetchSource()); try { final int initialCapacity = Math.min(1024, sourceAsBytes.length()); BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity); diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 9fe5cd892dc..f59fd142e71 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocumentRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteRequest; @@ -42,8 +42,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; @@ -54,10 +53,8 @@ import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; -/** - */ public class UpdateRequest extends InstanceShardOperationRequest - implements DocumentRequest, WriteRequest { + implements DocWriteRequest, WriteRequest { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(UpdateRequest.class)); @@ -227,14 +224,14 @@ public class UpdateRequest extends InstanceShardOperationRequest */ @Deprecated public String scriptString() { - return this.script == null ? null : this.script.getScript(); + return this.script == null ? null : this.script.getIdOrCode(); } /** * @deprecated Use {@link #script()} instead */ @Deprecated - public ScriptService.ScriptType scriptType() { + public ScriptType scriptType() { return this.script == null ? null : this.script.getType(); } @@ -254,7 +251,7 @@ public class UpdateRequest extends InstanceShardOperationRequest * @deprecated Use {@link #script(Script)} instead */ @Deprecated - public UpdateRequest script(String script, ScriptService.ScriptType scriptType) { + public UpdateRequest script(String script, ScriptType scriptType) { updateOrCreateScript(script, scriptType, null, null); return this; } @@ -330,13 +327,13 @@ public class UpdateRequest extends InstanceShardOperationRequest private void updateOrCreateScript(String scriptContent, ScriptType type, String lang, Map params) { Script script = script(); if (script == null) { - script = new Script(scriptContent == null ? "" : scriptContent, type == null ? ScriptType.INLINE : type, lang, params); + script = new Script(type == null ? ScriptType.INLINE : type, lang, scriptContent == null ? "" : scriptContent, params); } else { - String newScriptContent = scriptContent == null ? script.getScript() : scriptContent; + String newScriptContent = scriptContent == null ? script.getIdOrCode() : scriptContent; ScriptType newScriptType = type == null ? script.getType() : type; String newScriptLang = lang == null ? script.getLang() : lang; Map newScriptParams = params == null ? script.getParams() : params; - script = new Script(newScriptContent, newScriptType, newScriptLang, newScriptParams); + script = new Script(newScriptType, newScriptLang, newScriptContent, newScriptParams); } script(script); } @@ -349,8 +346,8 @@ public class UpdateRequest extends InstanceShardOperationRequest * @deprecated Use {@link #script(Script)} instead */ @Deprecated - public UpdateRequest script(String script, ScriptService.ScriptType scriptType, @Nullable Map scriptParams) { - this.script = new Script(script, scriptType, null, scriptParams); + public UpdateRequest script(String script, ScriptType scriptType, @Nullable Map scriptParams) { + this.script = new Script(scriptType, Script.DEFAULT_SCRIPT_LANG, script, scriptParams); return this; } @@ -371,9 +368,9 @@ public class UpdateRequest extends InstanceShardOperationRequest * @deprecated Use {@link #script(Script)} instead */ @Deprecated - public UpdateRequest script(String script, @Nullable String scriptLang, ScriptService.ScriptType scriptType, + public UpdateRequest script(String script, @Nullable String scriptLang, ScriptType scriptType, @Nullable Map scriptParams) { - this.script = new Script(script, scriptType, scriptLang, scriptParams); + this.script = new Script(scriptType, scriptLang, script, scriptParams); return this; } @@ -400,7 +397,8 @@ public class UpdateRequest extends InstanceShardOperationRequest * the returned _source */ public UpdateRequest fetchSource(@Nullable String include, @Nullable String exclude) { - this.fetchSourceContext = new FetchSourceContext(include, exclude); + FetchSourceContext context = this.fetchSourceContext == null ? FetchSourceContext.FETCH_SOURCE : this.fetchSourceContext; + this.fetchSourceContext = new FetchSourceContext(context.fetchSource(), new String[] {include}, new String[]{exclude}); return this; } @@ -417,7 +415,8 @@ public class UpdateRequest extends InstanceShardOperationRequest * filter the returned _source */ public UpdateRequest fetchSource(@Nullable String[] includes, @Nullable String[] excludes) { - this.fetchSourceContext = new FetchSourceContext(includes, excludes); + FetchSourceContext context = this.fetchSourceContext == null ? FetchSourceContext.FETCH_SOURCE : this.fetchSourceContext; + this.fetchSourceContext = new FetchSourceContext(context.fetchSource(), includes, excludes); return this; } @@ -425,7 +424,8 @@ public class UpdateRequest extends InstanceShardOperationRequest * Indicates whether the response should contain the updated _source. */ public UpdateRequest fetchSource(boolean fetchSource) { - this.fetchSourceContext = new FetchSourceContext(fetchSource); + FetchSourceContext context = this.fetchSourceContext == null ? FetchSourceContext.FETCH_SOURCE : this.fetchSourceContext; + this.fetchSourceContext = new FetchSourceContext(fetchSource, context.includes(), context.excludes()); return this; } @@ -468,31 +468,33 @@ public class UpdateRequest extends InstanceShardOperationRequest return this.retryOnConflict; } - /** - * Sets the version, which will cause the index operation to only be performed if a matching - * version exists and no changes happened on the doc since then. - */ + @Override public UpdateRequest version(long version) { this.version = version; return this; } + @Override public long version() { return this.version; } - /** - * Sets the versioning type. Defaults to {@link VersionType#INTERNAL}. - */ + @Override public UpdateRequest versionType(VersionType versionType) { this.versionType = versionType; return this; } + @Override public VersionType versionType() { return this.versionType; } + @Override + public OpType opType() { + return OpType.UPDATE; + } + @Override public UpdateRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { this.refreshPolicy = refreshPolicy; @@ -715,7 +717,7 @@ public class UpdateRequest extends InstanceShardOperationRequest return detectNoop; } - public UpdateRequest fromXContent(BytesReference source) throws Exception { + public UpdateRequest fromXContent(BytesReference source) throws IOException { Script script = null; try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) { XContentParser.Token token = parser.nextToken(); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 8da3d6b658c..022f7a3c4de 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -271,9 +271,6 @@ final class Bootstrap { closeSystOut(); } - // fail if using broken version - JVMCheck.check(); - // fail if somebody replaced the lucene jars checkLucene(); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java index de80b487c7e..06c334fd42a 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java @@ -43,13 +43,13 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.function.Predicate; +import java.util.regex.Matcher; +import java.util.regex.Pattern; /** - * We enforce limits once any network host is configured. In this case we assume the node is running in production - * and all production limit checks must pass. This should be extended as we go to settings like: - * - discovery.zen.ping.unicast.hosts is set if we use zen disco - * - ensure we can write in all data directories - * - fail if the default cluster.name is used, if this is setup on network a real clustername should be used? + * We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface. In this case we assume the node is + * running in production and all bootstrap checks must pass. */ final class BootstrapCheck { @@ -57,8 +57,7 @@ final class BootstrapCheck { } /** - * checks the current limits against the snapshot or release build - * checks + * Executes the bootstrap checks if the node has the transport protocol bound to a non-loopback interface. * * @param settings the current node settings * @param boundTransportAddress the node network bindings @@ -71,15 +70,12 @@ final class BootstrapCheck { } /** - * executes the provided checks and fails the node if - * enforceLimits is true, otherwise logs warnings + * Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings. * - * @param enforceLimits true if the checks should be enforced or - * otherwise warned - * @param checks the checks to execute - * @param nodeName the node name to be used as a logging prefix + * @param enforceLimits {@code true} if the checks should be enforced or otherwise warned + * @param checks the checks to execute + * @param nodeName the node name to be used as a logging prefix */ - // visible for testing static void check( final boolean enforceLimits, final List checks, @@ -88,13 +84,11 @@ final class BootstrapCheck { } /** - * executes the provided checks and fails the node if - * enforceLimits is true, otherwise logs warnings + * Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings. * - * @param enforceLimits true if the checks should be enforced or - * otherwise warned - * @param checks the checks to execute - * @param logger the logger to + * @param enforceLimits {@code true} if the checks should be enforced or otherwise warned + * @param checks the checks to execute + * @param logger the logger to */ static void check( final boolean enforceLimits, @@ -137,15 +131,16 @@ final class BootstrapCheck { } /** - * Tests if the checks should be enforced + * Tests if the checks should be enforced. * * @param boundTransportAddress the node network bindings - * @return true if the checks should be enforced + * @return {@code true} if the checks should be enforced */ - // visible for testing static boolean enforceLimits(BoundTransportAddress boundTransportAddress) { - return !(Arrays.stream(boundTransportAddress.boundAddresses()).allMatch(TransportAddress::isLoopbackOrLinkLocalAddress) && - boundTransportAddress.publishAddress().isLoopbackOrLinkLocalAddress()); + Predicate isLoopbackOrLinkLocalAddress = t -> t.address().getAddress().isLinkLocalAddress() + || t.address().getAddress().isLoopbackAddress(); + return !(Arrays.stream(boundTransportAddress.boundAddresses()).allMatch(isLoopbackOrLinkLocalAddress) && + isLoopbackOrLinkLocalAddress.test(boundTransportAddress.publishAddress())); } // the list of checks to execute @@ -169,23 +164,24 @@ final class BootstrapCheck { checks.add(new UseSerialGCCheck()); checks.add(new OnErrorCheck()); checks.add(new OnOutOfMemoryErrorCheck()); + checks.add(new G1GCCheck()); return Collections.unmodifiableList(checks); } /** - * Encapsulates a limit check + * Encapsulates a bootstrap check. */ interface Check { /** - * test if the node fails the check + * Test if the node fails the check. * - * @return true if the node failed the check + * @return {@code true} if the node failed the check */ boolean check(); /** - * the message for a failed check + * The error message for a failed check. * * @return the error message on check failure */ @@ -265,7 +261,7 @@ final class BootstrapCheck { public final String errorMessage() { return String.format( Locale.ROOT, - "max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]", + "max file descriptors [%d] for elasticsearch process is too low, increase to at least [%d]", getMaxFileDescriptorCount(), limit ); @@ -305,7 +301,8 @@ final class BootstrapCheck { static class MaxNumberOfThreadsCheck implements Check { - private final long maxNumberOfThreadsThreshold = 1 << 11; + // this should be plenty for machines up to 256 cores + private final long maxNumberOfThreadsThreshold = 1 << 12; @Override public boolean check() { @@ -316,7 +313,7 @@ final class BootstrapCheck { public String errorMessage() { return String.format( Locale.ROOT, - "max number of threads [%d] for user [%s] likely too low, increase to at least [%d]", + "max number of threads [%d] for user [%s] is too low, increase to at least [%d]", getMaxNumberOfThreads(), BootstrapInfo.getSystemProperties().get("user.name"), maxNumberOfThreadsThreshold); @@ -340,7 +337,7 @@ final class BootstrapCheck { public String errorMessage() { return String.format( Locale.ROOT, - "max size virtual memory [%d] for user [%s] likely too low, increase to [unlimited]", + "max size virtual memory [%d] for user [%s] is too low, increase to [unlimited]", getMaxSizeVirtualMemory(), BootstrapInfo.getSystemProperties().get("user.name")); } @@ -370,7 +367,7 @@ final class BootstrapCheck { public String errorMessage() { return String.format( Locale.ROOT, - "max virtual memory areas vm.max_map_count [%d] likely too low, increase to at least [%d]", + "max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]", getMaxMapCount(), limit); } @@ -545,4 +542,59 @@ final class BootstrapCheck { } + /** + * Bootstrap check for versions of HotSpot that are known to have issues that can lead to index corruption when G1GC is enabled. + */ + static class G1GCCheck implements BootstrapCheck.Check { + + @Override + public boolean check() { + if ("Oracle Corporation".equals(jvmVendor()) && isJava8() && isG1GCEnabled()) { + final String jvmVersion = jvmVersion(); + // HotSpot versions on Java 8 match this regular expression; note that this changes with Java 9 after JEP-223 + final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)-b\\d+"); + final Matcher matcher = pattern.matcher(jvmVersion); + final boolean matches = matcher.matches(); + assert matches : jvmVersion; + final int major = Integer.parseInt(matcher.group(1)); + final int update = Integer.parseInt(matcher.group(2)); + // HotSpot versions for Java 8 have major version 25, the bad versions are all versions prior to update 40 + return major == 25 && update < 40; + } else { + return false; + } + } + + // visible for testing + String jvmVendor() { + return Constants.JVM_VENDOR; + } + + // visible for testing + boolean isG1GCEnabled() { + assert "Oracle Corporation".equals(jvmVendor()); + return JvmInfo.jvmInfo().useG1GC().equals("true"); + } + + // visible for testing + String jvmVersion() { + assert "Oracle Corporation".equals(jvmVendor()); + return Constants.JVM_VERSION; + } + + // visible for testing + boolean isJava8() { + assert "Oracle Corporation".equals(jvmVendor()); + return JavaVersion.current().equals(JavaVersion.parse("1.8")); + } + + @Override + public String errorMessage() { + return String.format( + Locale.ROOT, + "JVM version [%s] can cause data corruption when used with G1GC; upgrade to at least Java 8u40", jvmVersion()); + } + + } + } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java deleted file mode 100644 index c367b38a79b..00000000000 --- a/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.bootstrap; - -import org.apache.lucene.util.Constants; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.monitor.jvm.JvmInfo; - -import java.lang.management.ManagementFactory; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; - -/** Checks that the JVM is ok and won't cause index corruption */ -final class JVMCheck { - /** no instantiation */ - private JVMCheck() {} - - /** - * URL with latest JVM recommendations - */ - static final String JVM_RECOMMENDATIONS = "http://www.elastic.co/guide/en/elasticsearch/reference/current/_installation.html"; - - /** - * System property which if set causes us to bypass the check completely (but issues a warning in doing so) - */ - static final String JVM_BYPASS = "es.bypass.vm.check"; - - /** - * Metadata and messaging for checking and reporting HotSpot - * issues. - */ - interface HotSpotCheck { - /** - * If this HotSpot check should be executed. - * - * @return true if this HotSpot check should be executed - */ - boolean check(); - - /** - * The error message to display when this HotSpot issue is - * present. - * - * @return the error message for this HotSpot issue - */ - String getErrorMessage(); - - /** - * The warning message for this HotSpot issue if a workaround - * exists and is used. - * - * @return the warning message for this HotSpot issue - */ - Optional getWarningMessage(); - - /** - * The workaround for this HotSpot issue, if one exists. - * - * @return the workaround for this HotSpot issue, if one exists - */ - Optional getWorkaround(); - } - - /** - * Metadata and messaging for hotspot bugs. - */ - static class HotspotBug implements HotSpotCheck { - - /** OpenJDK bug URL */ - final String bugUrl; - - /** Compiler workaround flag (null if there is no workaround) */ - final String workAround; - - HotspotBug(String bugUrl, String workAround) { - this.bugUrl = bugUrl; - this.workAround = workAround; - } - - /** Returns an error message to the user for a broken version */ - public String getErrorMessage() { - StringBuilder sb = new StringBuilder(); - sb.append("Java version: ").append(fullVersion()); - sb.append(" suffers from critical bug ").append(bugUrl); - sb.append(" which can cause data corruption."); - sb.append(System.lineSeparator()); - sb.append("Please upgrade the JVM, see ").append(JVM_RECOMMENDATIONS); - sb.append(" for current recommendations."); - if (workAround != null) { - sb.append(System.lineSeparator()); - sb.append("If you absolutely cannot upgrade, please add ").append(workAround); - sb.append(" to the ES_JAVA_OPTS environment variable."); - sb.append(System.lineSeparator()); - sb.append("Upgrading is preferred, this workaround will result in degraded performance."); - } - return sb.toString(); - } - - /** Warns the user when a workaround is being used to dodge the bug */ - public Optional getWarningMessage() { - StringBuilder sb = new StringBuilder(); - sb.append("Workaround flag ").append(workAround); - sb.append(" for bug ").append(bugUrl); - sb.append(" found. "); - sb.append(System.lineSeparator()); - sb.append("This will result in degraded performance!"); - sb.append(System.lineSeparator()); - sb.append("Upgrading is preferred, see ").append(JVM_RECOMMENDATIONS); - sb.append(" for current recommendations."); - return Optional.of(sb.toString()); - } - - public boolean check() { - return true; - } - - @Override - public Optional getWorkaround() { - return Optional.of(workAround); - } - } - - static class G1GCCheck implements HotSpotCheck { - @Override - public boolean check() { - return JvmInfo.jvmInfo().useG1GC().equals("true"); - } - - /** Returns an error message to the user for a broken version */ - public String getErrorMessage() { - StringBuilder sb = new StringBuilder(); - sb.append("Java version: ").append(fullVersion()); - sb.append(" can cause data corruption"); - sb.append(" when used with G1GC."); - sb.append(System.lineSeparator()); - sb.append("Please upgrade the JVM, see ").append(JVM_RECOMMENDATIONS); - sb.append(" for current recommendations."); - return sb.toString(); - } - - @Override - public Optional getWarningMessage() { - return Optional.empty(); - } - - @Override - public Optional getWorkaround() { - return Optional.empty(); - } - } - - /** mapping of hotspot version to hotspot bug information for the most serious bugs */ - static final Map JVM_BROKEN_HOTSPOT_VERSIONS; - - static { - Map bugs = new HashMap<>(); - - // 1.7.0: loop optimizer bug - bugs.put("21.0-b17", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-7070134", "-XX:-UseLoopPredicate")); - // register allocation issues (technically only x86/amd64). This impacted update 40, 45, and 51 - bugs.put("24.0-b56", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-8024830", "-XX:-UseSuperWord")); - bugs.put("24.45-b08", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-8024830", "-XX:-UseSuperWord")); - bugs.put("24.51-b03", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-8024830", "-XX:-UseSuperWord")); - G1GCCheck g1GcCheck = new G1GCCheck(); - bugs.put("25.0-b70", g1GcCheck); - bugs.put("25.11-b03", g1GcCheck); - bugs.put("25.20-b23", g1GcCheck); - bugs.put("25.25-b02", g1GcCheck); - bugs.put("25.31-b07", g1GcCheck); - - JVM_BROKEN_HOTSPOT_VERSIONS = Collections.unmodifiableMap(bugs); - } - - /** - * Checks that the current JVM is "ok". This means it doesn't have severe bugs that cause data corruption. - */ - static void check() { - if (Boolean.parseBoolean(System.getProperty(JVM_BYPASS))) { - Loggers.getLogger(JVMCheck.class).warn("bypassing jvm version check for version [{}], this can result in data corruption!", fullVersion()); - } else if ("Oracle Corporation".equals(Constants.JVM_VENDOR)) { - HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION); - if (bug != null && bug.check()) { - if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) { - Loggers.getLogger(JVMCheck.class).warn("{}", bug.getWarningMessage().get()); - } else { - throw new RuntimeException(bug.getErrorMessage()); - } - } - } else if ("IBM Corporation".equals(Constants.JVM_VENDOR)) { - // currently some old JVM versions from IBM will easily result in index corruption. - // 2.8+ seems ok for ES from testing. - float version = Float.POSITIVE_INFINITY; - try { - version = Float.parseFloat(Constants.JVM_VERSION); - } catch (NumberFormatException ignored) { - // this is just a simple best-effort to detect old runtimes, - // if we cannot parse it, we don't fail. - } - if (version < 2.8f) { - StringBuilder sb = new StringBuilder(); - sb.append("IBM J9 runtimes < 2.8 suffer from several bugs which can cause data corruption."); - sb.append(System.lineSeparator()); - sb.append("Your version: " + fullVersion()); - sb.append(System.lineSeparator()); - sb.append("Please upgrade the JVM to a recent IBM JDK"); - throw new RuntimeException(sb.toString()); - } - } - } - - /** - * Returns java + jvm version, looks like this: - * {@code Oracle Corporation 1.8.0_45 [Java HotSpot(TM) 64-Bit Server VM 25.45-b02]} - */ - static String fullVersion() { - StringBuilder sb = new StringBuilder(); - sb.append(Constants.JAVA_VENDOR); - sb.append(" "); - sb.append(Constants.JAVA_VERSION); - sb.append(" ["); - sb.append(Constants.JVM_NAME); - sb.append(" "); - sb.append(Constants.JVM_VERSION); - sb.append("]"); - return sb.toString(); - } -} diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index ebf9ab5f554..22ba936d903 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -272,20 +272,6 @@ public class JarHell { "class: " + clazz + System.lineSeparator() + "exists multiple times in jar: " + jarpath + " !!!!!!!!!"); } else { - if (clazz.startsWith("org.apache.logging.log4j.core.impl.ThrowableProxy")) { - /* - * deliberate to hack around a bug in Log4j - * cf. https://github.com/elastic/elasticsearch/issues/20304 - * cf. https://issues.apache.org/jira/browse/LOG4J2-1560 - */ - return; - } else if (clazz.startsWith("org.apache.logging.log4j.core.jmx.Server")) { - /* - * deliberate to hack around a bug in Log4j - * cf. https://issues.apache.org/jira/browse/LOG4J2-1506 - */ - return; - } throw new IllegalStateException("jar hell!" + System.lineSeparator() + "class: " + clazz + System.lineSeparator() + "jar1: " + previous + System.lineSeparator() + diff --git a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index 9e0d1a94119..14abc77513a 100644 --- a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -563,6 +563,11 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ DeletePipelineRequestBuilder prepareDeletePipeline(); + /** + * Deletes a stored ingest pipeline + */ + DeletePipelineRequestBuilder prepareDeletePipeline(String id); + /** * Returns a stored ingest pipeline */ diff --git a/core/src/main/java/org/elasticsearch/client/node/NodeClient.java b/core/src/main/java/org/elasticsearch/client/node/NodeClient.java index e68b902e259..5fc2319284d 100644 --- a/core/src/main/java/org/elasticsearch/client/node/NodeClient.java +++ b/core/src/main/java/org/elasticsearch/client/node/NodeClient.java @@ -46,7 +46,7 @@ public class NodeClient extends AbstractClient { super(settings, threadPool); } - public void intialize(Map actions) { + public void initialize(Map actions) { this.actions = actions; } diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index c3816d8d37f..006040b8e16 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -348,9 +348,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.Map; -/** - * - */ public abstract class AbstractClient extends AbstractComponent implements Client { private final ThreadPool threadPool; @@ -1099,6 +1096,11 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new DeletePipelineRequestBuilder(this, DeletePipelineAction.INSTANCE); } + @Override + public DeletePipelineRequestBuilder prepareDeletePipeline(String id) { + return new DeletePipelineRequestBuilder(this, DeletePipelineAction.INSTANCE, id); + } + @Override public void getPipeline(GetPipelineRequest request, ActionListener listener) { execute(GetPipelineAction.INSTANCE, request, listener); diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 9f0675f308d..7ef7f400a53 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -151,7 +151,7 @@ public abstract class TransportClient extends AbstractClient { bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); final Transport transport = networkModule.getTransportSupplier().get(); final TransportService transportService = new TransportService(settings, transport, threadPool, - networkModule.getTransportInterceptor()); + networkModule.getTransportInterceptor(), null); modules.add((b -> { b.bind(BigArrays.class).toInstance(bigArrays); b.bind(PluginsService.class).toInstance(pluginsService); diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 18c2d15ec39..f36e9c60526 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -66,9 +66,6 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; -/** - * - */ public class TransportClientNodesService extends AbstractComponent implements Closeable { private final TimeValue nodesSamplerInterval; diff --git a/core/src/main/java/org/elasticsearch/client/transport/support/TransportProxyClient.java b/core/src/main/java/org/elasticsearch/client/transport/support/TransportProxyClient.java index 34833e9400a..31af25a494a 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/support/TransportProxyClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/support/TransportProxyClient.java @@ -38,9 +38,6 @@ import java.util.Map; import static java.util.Collections.unmodifiableMap; -/** - * - */ public class TransportProxyClient { private final TransportClientNodesService nodesService; diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 4e582cb32ca..9bc55054a1d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -19,12 +19,10 @@ package org.elasticsearch.cluster; -import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexTemplateFilter; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; @@ -55,18 +53,17 @@ import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAl import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.tasks.TaskResultsService; import java.util.Collection; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -109,21 +106,21 @@ public class ClusterModule extends AbstractModule { public static Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings, List clusterPlugins) { // collect deciders by class so that we can detect duplicates - Map deciders = new HashMap<>(); + Map deciders = new LinkedHashMap<>(); addAllocationDecider(deciders, new MaxRetryAllocationDecider(settings)); - addAllocationDecider(deciders, new SameShardAllocationDecider(settings)); - addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new ReplicaAfterPrimaryActiveAllocationDecider(settings)); - addAllocationDecider(deciders, new ThrottlingAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new RebalanceOnlyWhenActiveAllocationDecider(settings)); addAllocationDecider(deciders, new ClusterRebalanceAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new ConcurrentRebalanceAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new EnableAllocationDecider(settings, clusterSettings)); - addAllocationDecider(deciders, new AwarenessAllocationDecider(settings, clusterSettings)); - addAllocationDecider(deciders, new ShardsLimitAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new NodeVersionAllocationDecider(settings)); + addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider(settings)); + addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings)); + addAllocationDecider(deciders, new SameShardAllocationDecider(settings)); addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings)); - addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider(settings, clusterSettings)); + addAllocationDecider(deciders, new ThrottlingAllocationDecider(settings, clusterSettings)); + addAllocationDecider(deciders, new ShardsLimitAllocationDecider(settings, clusterSettings)); + addAllocationDecider(deciders, new AwarenessAllocationDecider(settings, clusterSettings)); clusterPlugins.stream() .flatMap(p -> p.createAllocationDeciders(settings, clusterSettings).stream()) diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index e592b5092b7..6cf973955c7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -52,8 +52,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.local.LocalDiscovery; -import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; +import org.elasticsearch.discovery.zen.PublishClusterStateAction; import java.io.IOException; import java.util.EnumSet; @@ -65,15 +64,13 @@ import java.util.Set; /** * Represents the current state of the cluster. *

    - * The cluster state object is immutable with an - * exception of the {@link RoutingNodes} structure, which is built on demand from the {@link RoutingTable}, - * and cluster state {@link #status}, which is updated during cluster state publishing and applying - * processing. The cluster state can be updated only on the master node. All updates are performed by on a + * The cluster state object is immutable with an exception of the {@link RoutingNodes} structure, which is + * built on demand from the {@link RoutingTable}. + * The cluster state can be updated only on the master node. All updates are performed by on a * single thread and controlled by the {@link ClusterService}. After every update the * {@link Discovery#publish} method publishes new version of the cluster state to all other nodes in the * cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on - * the type of discovery. For example, for local discovery it is implemented by the {@link LocalDiscovery#publish} - * method. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The + * the type of discovery. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The * publishing mechanism can be overridden by other discovery. *

    * The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state @@ -91,23 +88,6 @@ public class ClusterState implements ToXContent, Diffable { public static final ClusterState PROTO = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); - public static enum ClusterStateStatus { - UNKNOWN((byte) 0), - RECEIVED((byte) 1), - BEING_APPLIED((byte) 2), - APPLIED((byte) 3); - - private final byte id; - - ClusterStateStatus(byte id) { - this.id = id; - } - - public byte id() { - return this.id; - } - } - public interface Custom extends Diffable, ToXContent { String type(); @@ -168,8 +148,6 @@ public class ClusterState implements ToXContent, Diffable { // built on demand private volatile RoutingNodes routingNodes; - private volatile ClusterStateStatus status; - public ClusterState(long version, String stateUUID, ClusterState state) { this(state.clusterName, version, stateUUID, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs(), false); } @@ -183,19 +161,9 @@ public class ClusterState implements ToXContent, Diffable { this.nodes = nodes; this.blocks = blocks; this.customs = customs; - this.status = ClusterStateStatus.UNKNOWN; this.wasReadFromDiff = wasReadFromDiff; } - public ClusterStateStatus status() { - return status; - } - - public ClusterState status(ClusterStateStatus newStatus) { - this.status = newStatus; - return this; - } - public long version() { return this.version; } @@ -276,15 +244,16 @@ public class ClusterState implements ToXContent, Diffable { return routingNodes; } - public String prettyPrint() { + @Override + public String toString() { StringBuilder sb = new StringBuilder(); sb.append("cluster uuid: ").append(metaData.clusterUUID()).append("\n"); sb.append("version: ").append(version).append("\n"); sb.append("state uuid: ").append(stateUUID).append("\n"); sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); sb.append("meta data version: ").append(metaData.version()).append("\n"); + final String TAB = " "; for (IndexMetaData indexMetaData : metaData) { - final String TAB = " "; sb.append(TAB).append(indexMetaData.getIndex()); sb.append(": v[").append(indexMetaData.getVersion()).append("]\n"); for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) { @@ -293,24 +262,19 @@ public class ClusterState implements ToXContent, Diffable { sb.append("isa_ids ").append(indexMetaData.inSyncAllocationIds(shard)).append("\n"); } } - sb.append(blocks().prettyPrint()); - sb.append(nodes().prettyPrint()); - sb.append(routingTable().prettyPrint()); - sb.append(getRoutingNodes().prettyPrint()); - return sb.toString(); - } - - @Override - public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return builder.string(); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; + sb.append(blocks()); + sb.append(nodes()); + sb.append(routingTable()); + sb.append(getRoutingNodes()); + if (customs.isEmpty() == false) { + sb.append("customs:\n"); + for (ObjectObjectCursor cursor : customs) { + final String type = cursor.key; + final Custom custom = cursor.value; + sb.append(TAB).append(type).append(": ").append(custom); + } } + return sb.toString(); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java index e18ec5543d9..17dec4cf504 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java @@ -22,6 +22,8 @@ package org.elasticsearch.cluster; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.ClusterStateStatus; +import org.elasticsearch.cluster.service.ClusterServiceState; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -50,7 +52,7 @@ public class ClusterStateObserver { volatile TimeValue timeOutValue; - final AtomicReference lastObservedState; + final AtomicReference lastObservedState; final TimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener(); // observingContext is not null when waiting on cluster state changes final AtomicReference observingContext = new AtomicReference<>(null); @@ -69,7 +71,7 @@ public class ClusterStateObserver { */ public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, ThreadContext contextHolder) { this.clusterService = clusterService; - this.lastObservedState = new AtomicReference<>(new ObservedState(clusterService.state())); + this.lastObservedState = new AtomicReference<>(clusterService.clusterServiceState()); this.timeOutValue = timeout; if (timeOutValue != null) { this.startTimeNS = System.nanoTime(); @@ -78,11 +80,11 @@ public class ClusterStateObserver { this.contextHolder = contextHolder; } - /** last cluster state observer by this observer. Note that this may not be the current one */ - public ClusterState observedState() { - ObservedState state = lastObservedState.get(); + /** last cluster state and status observed by this observer. Note that this may not be the current one */ + public ClusterServiceState observedState() { + ClusterServiceState state = lastObservedState.get(); assert state != null; - return state.clusterState; + return state; } /** indicates whether this observer has timedout */ @@ -126,7 +128,7 @@ public class ClusterStateObserver { logger.trace("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS)); // update to latest, in case people want to retry timedOut = true; - lastObservedState.set(new ObservedState(clusterService.state())); + lastObservedState.set(clusterService.clusterServiceState()); listener.onTimeout(timeOutValue); return; } @@ -141,13 +143,13 @@ public class ClusterStateObserver { } // sample a new state - ObservedState newState = new ObservedState(clusterService.state()); - ObservedState lastState = lastObservedState.get(); - if (changePredicate.apply(lastState.clusterState, lastState.status, newState.clusterState, newState.status)) { + ClusterServiceState newState = clusterService.clusterServiceState(); + ClusterServiceState lastState = lastObservedState.get(); + if (changePredicate.apply(lastState, newState)) { // good enough, let's go. logger.trace("observer: sampled state accepted by predicate ({})", newState); lastObservedState.set(newState); - listener.onNewClusterState(newState.clusterState); + listener.onNewClusterState(newState.getClusterState()); } else { logger.trace("observer: sampled state rejected by predicate ({}). adding listener to ClusterService", newState); ObservingContext context = new ObservingContext(new ContextPreservingListener(listener, contextHolder.newStoredContext()), changePredicate); @@ -161,11 +163,11 @@ public class ClusterStateObserver { /** * reset this observer to the give cluster state. Any pending waits will be canceled. */ - public void reset(ClusterState toState) { + public void reset(ClusterServiceState state) { if (observingContext.getAndSet(null) != null) { clusterService.remove(clusterStateListener); } - lastObservedState.set(new ObservedState(toState)); + lastObservedState.set(state); } class ObserverClusterStateListener implements TimeoutClusterStateListener { @@ -180,10 +182,10 @@ public class ClusterStateObserver { if (context.changePredicate.apply(event)) { if (observingContext.compareAndSet(context, null)) { clusterService.remove(this); - ObservedState state = new ObservedState(event.state()); + ClusterServiceState state = new ClusterServiceState(event.state(), ClusterStateStatus.APPLIED); logger.trace("observer: accepting cluster state change ({})", state); lastObservedState.set(state); - context.listener.onNewClusterState(state.clusterState); + context.listener.onNewClusterState(state.getClusterState()); } else { logger.trace("observer: predicate approved change but observing context has changed - ignoring (new cluster state version [{}])", event.state().version()); } @@ -199,15 +201,15 @@ public class ClusterStateObserver { // No need to remove listener as it is the responsibility of the thread that set observingContext to null return; } - ObservedState newState = new ObservedState(clusterService.state()); - ObservedState lastState = lastObservedState.get(); - if (context.changePredicate.apply(lastState.clusterState, lastState.status, newState.clusterState, newState.status)) { + ClusterServiceState newState = clusterService.clusterServiceState(); + ClusterServiceState lastState = lastObservedState.get(); + if (context.changePredicate.apply(lastState, newState)) { // double check we're still listening if (observingContext.compareAndSet(context, null)) { logger.trace("observer: post adding listener: accepting current cluster state ({})", newState); clusterService.remove(this); lastObservedState.set(newState); - context.listener.onNewClusterState(newState.clusterState); + context.listener.onNewClusterState(newState.getClusterState()); } else { logger.trace("observer: postAdded - predicate approved state but observing context has changed - ignoring ({})", newState); } @@ -235,7 +237,7 @@ public class ClusterStateObserver { long timeSinceStartMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeNS); logger.trace("observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS)); // update to latest, in case people want to retry - lastObservedState.set(new ObservedState(clusterService.state())); + lastObservedState.set(clusterService.clusterServiceState()); timedOut = true; context.listener.onTimeout(timeOutValue); } @@ -260,10 +262,8 @@ public class ClusterStateObserver { * * @return true if newState should be accepted */ - boolean apply(ClusterState previousState, - ClusterState.ClusterStateStatus previousStatus, - ClusterState newState, - ClusterState.ClusterStateStatus newStatus); + boolean apply(ClusterServiceState previousState, + ClusterServiceState newState); /** * called to see whether a cluster change should be accepted @@ -277,22 +277,25 @@ public class ClusterStateObserver { public abstract static class ValidationPredicate implements ChangePredicate { @Override - public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus, ClusterState newState, ClusterState.ClusterStateStatus newStatus) { - return (previousState != newState || previousStatus != newStatus) && validate(newState); + public boolean apply(ClusterServiceState previousState, ClusterServiceState newState) { + return (previousState.getClusterState() != newState.getClusterState() || + previousState.getClusterStateStatus() != newState.getClusterStateStatus()) && + validate(newState); } - protected abstract boolean validate(ClusterState newState); + protected abstract boolean validate(ClusterServiceState newState); @Override public boolean apply(ClusterChangedEvent changedEvent) { - return changedEvent.previousState().version() != changedEvent.state().version() && validate(changedEvent.state()); + return changedEvent.previousState().version() != changedEvent.state().version() && + validate(new ClusterServiceState(changedEvent.state(), ClusterStateStatus.APPLIED)); } } public abstract static class EventPredicate implements ChangePredicate { @Override - public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus, ClusterState newState, ClusterState.ClusterStateStatus newStatus) { - return previousState != newState || previousStatus != newStatus; + public boolean apply(ClusterServiceState previousState, ClusterServiceState newState) { + return previousState.getClusterState() != newState.getClusterState() || previousState.getClusterStateStatus() != newState.getClusterStateStatus(); } } @@ -307,21 +310,6 @@ public class ClusterStateObserver { } } - static class ObservedState { - public final ClusterState clusterState; - public final ClusterState.ClusterStateStatus status; - - public ObservedState(ClusterState clusterState) { - this.clusterState = clusterState; - this.status = clusterState.status(); - } - - @Override - public String toString() { - return "version [" + clusterState.version() + "], status [" + status + "]"; - } - } - private static final class ContextPreservingListener implements Listener { private final Listener delegate; private final ThreadContext.StoredContext tempContext; diff --git a/core/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java b/core/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java index afb557e5bbc..0ee5c891282 100644 --- a/core/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java +++ b/core/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java @@ -19,18 +19,19 @@ package org.elasticsearch.cluster; +import org.elasticsearch.cluster.service.ClusterServiceState; + public enum MasterNodeChangePredicate implements ClusterStateObserver.ChangePredicate { INSTANCE; @Override public boolean apply( - ClusterState previousState, - ClusterState.ClusterStateStatus previousStatus, - ClusterState newState, - ClusterState.ClusterStateStatus newStatus) { + ClusterServiceState previousState, + ClusterServiceState newState) { // checking if the masterNodeId changed is insufficient as the // same master node might get re-elected after a disruption - return newState.nodes().getMasterNodeId() != null && newState != previousState; + return newState.getClusterState().nodes().getMasterNodeId() != null && + newState.getClusterState() != previousState.getClusterState(); } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index 99f161b9da5..94333c10dde 100644 --- a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -31,9 +31,12 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.KeyedLock; +import org.elasticsearch.discovery.zen.MasterFaultDetection; +import org.elasticsearch.discovery.zen.NodesFaultDetection; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ScheduledFuture; @@ -45,8 +48,8 @@ import static org.elasticsearch.common.settings.Setting.positiveTimeSetting; * This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are * removed. Also, it periodically checks that all connections are still open and if needed restores them. * Note that this component is *not* responsible for removing nodes from the cluster if they disconnect / do not respond - * to pings. This is done by {@link org.elasticsearch.discovery.zen.fd.NodesFaultDetection}. Master fault detection - * is done by {@link org.elasticsearch.discovery.zen.fd.MasterFaultDetection}. + * to pings. This is done by {@link NodesFaultDetection}. Master fault detection + * is done by {@link MasterFaultDetection}. */ public class NodeConnectionsService extends AbstractLifecycleComponent { @@ -73,10 +76,10 @@ public class NodeConnectionsService extends AbstractLifecycleComponent { this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings); } - public void connectToAddedNodes(ClusterChangedEvent event) { + public void connectToNodes(List addedNodes) { // TODO: do this in parallel (and wait) - for (final DiscoveryNode node : event.nodesDelta().addedNodes()) { + for (final DiscoveryNode node : addedNodes) { try (Releasable ignored = nodeLocks.acquire(node)) { Integer current = nodes.put(node, 0); assert current == null : "node " + node + " was added in event but already in internal nodes"; @@ -85,8 +88,8 @@ public class NodeConnectionsService extends AbstractLifecycleComponent { } } - public void disconnectFromRemovedNodes(ClusterChangedEvent event) { - for (final DiscoveryNode node : event.nodesDelta().removedNodes()) { + public void disconnectFromNodes(List removedNodes) { + for (final DiscoveryNode node : removedNodes) { try (Releasable ignored = nodeLocks.acquire(node)) { Integer current = nodes.remove(node); assert current != null : "node " + node + " was removed in event but not in internal nodes"; diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 70880373530..6df5f85987d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -210,12 +210,9 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus public static class ShardSnapshotStatus { - private State state; - private String nodeId; - private String reason; - - private ShardSnapshotStatus() { - } + private final State state; + private final String nodeId; + private final String reason; public ShardSnapshotStatus(String nodeId) { this(nodeId, State.INIT); @@ -231,6 +228,12 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus this.reason = reason; } + public ShardSnapshotStatus(StreamInput in) throws IOException { + nodeId = in.readOptionalString(); + state = State.fromValue(in.readByte()); + reason = in.readOptionalString(); + } + public State state() { return state; } @@ -243,18 +246,6 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus return reason; } - public static ShardSnapshotStatus readShardSnapshotStatus(StreamInput in) throws IOException { - ShardSnapshotStatus shardSnapshotStatus = new ShardSnapshotStatus(); - shardSnapshotStatus.readFrom(in); - return shardSnapshotStatus; - } - - public void readFrom(StreamInput in) throws IOException { - nodeId = in.readOptionalString(); - state = State.fromValue(in.readByte()); - reason = in.readOptionalString(); - } - public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(nodeId); out.writeByte(state.value); @@ -282,6 +273,11 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus result = 31 * result + (reason != null ? reason.hashCode() : 0); return result; } + + @Override + public String toString() { + return "ShardSnapshotStatus[state=" + state + ", nodeId=" + nodeId + ", reason=" + reason + "]"; + } } public enum State { diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index b1bf01018c9..3fc4f436154 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -41,9 +41,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -/** - * - */ public class NodeMappingRefreshAction extends AbstractComponent { public static final String ACTION_NAME = "internal:cluster/node/mapping/refresh"; diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index ce6473ecb42..d7964f0c429 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -92,7 +92,7 @@ public class ShardStateAction extends AbstractComponent { } private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardEntry shardEntry, final Listener listener) { - DiscoveryNode masterNode = observer.observedState().nodes().getMasterNode(); + DiscoveryNode masterNode = observer.observedState().getClusterState().nodes().getMasterNode(); if (masterNode == null) { logger.warn("{} no master known for action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry); waitForNewMasterAndRetry(actionName, observer, shardEntry, listener); @@ -164,7 +164,7 @@ public class ShardStateAction extends AbstractComponent { @Override public void onNewClusterState(ClusterState state) { if (logger.isTraceEnabled()) { - logger.trace("new cluster state [{}] after waiting for master election to fail shard entry [{}]", state.prettyPrint(), shardEntry); + logger.trace("new cluster state [{}] after waiting for master election to fail shard entry [{}]", state, shardEntry); } sendShardAction(actionName, observer, shardEntry, listener); } diff --git a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java index 5a7f8f7c0a9..253206222b4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java +++ b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java @@ -31,9 +31,6 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.Locale; -/** - * - */ public class ClusterBlock implements Streamable, ToXContent { private int id; diff --git a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java index 94fad64154d..9ebb2286895 100644 --- a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java +++ b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java @@ -30,9 +30,6 @@ import java.util.Set; import static java.util.Collections.unmodifiableSet; -/** - * - */ public class ClusterBlockException extends ElasticsearchException { private final Set blocks; diff --git a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java index 45ff1d3707b..9d39d410d03 100644 --- a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java +++ b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java @@ -22,9 +22,6 @@ package org.elasticsearch.cluster.block; import java.util.EnumSet; -/** - * - */ public enum ClusterBlockLevel { READ(0), WRITE(1), diff --git a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index e6f04c8702c..12e6ee0f7ec 100644 --- a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -199,7 +199,8 @@ public class ClusterBlocks extends AbstractDiffable { return new ClusterBlockException(unmodifiableSet(blocks.collect(toSet()))); } - public String prettyPrint() { + @Override + public String toString() { if (global.isEmpty() && indices().isEmpty()) { return ""; } diff --git a/core/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java b/core/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java index a261d28f537..8a255fb1ce8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java +++ b/core/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java @@ -26,9 +26,6 @@ import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; -/** - * - */ public enum ClusterHealthStatus implements Writeable { GREEN((byte) 0), YELLOW((byte) 1), diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java index 7e4d1917485..e62a3935ad5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java @@ -37,9 +37,6 @@ import java.util.Set; import static java.util.Collections.emptySet; -/** - * - */ public class AliasMetaData extends AbstractDiffable { public static final AliasMetaData PROTO = new AliasMetaData("", null, null, null); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index cb46b22fe7e..9a2f3dc5526 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -99,10 +99,11 @@ public class AliasValidator extends AbstractComponent { } } - private void validateAliasStandalone(String alias, String indexRouting) { + void validateAliasStandalone(String alias, String indexRouting) { if (!Strings.hasText(alias)) { throw new IllegalArgumentException("alias name is required"); } + MetaDataCreateIndexService.validateIndexOrAliasName(alias, InvalidAliasNameException::new); if (indexRouting != null && indexRouting.indexOf(',') != -1) { throw new IllegalArgumentException("alias [" + alias + "] has several index routing values associated with it"); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index c5ccd3bc6ff..25836d54a1b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -75,9 +75,6 @@ import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; -/** - * - */ public class IndexMetaData implements Diffable, FromXContentBuilder, ToXContent { public interface Custom extends Diffable, ToXContent { @@ -209,6 +206,11 @@ public class IndexMetaData implements Diffable, FromXContentBuild public static final String SETTING_VERSION_UPGRADED_STRING = "index.version.upgraded_string"; public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible"; public static final String SETTING_CREATION_DATE = "index.creation_date"; + /** + * The user provided name for an index. This is the plain string provided by the user when the index was created. + * It might still contain date math expressions etc. (added in 5.0) + */ + public static final String SETTING_INDEX_PROVIDED_NAME = "index.provided_name"; public static final String SETTING_PRIORITY = "index.priority"; public static final Setting INDEX_PRIORITY_SETTING = Setting.intSetting("index.priority", 1, 0, Property.Dynamic, Property.IndexScope); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 6ecf7483d80..377409dd86b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -48,7 +48,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; -import java.util.concurrent.Callable; import java.util.stream.Collectors; public class IndexNameExpressionResolver extends AbstractComponent { @@ -580,6 +579,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { private Set innerResolve(Context context, List expressions, IndicesOptions options, MetaData metaData) { Set result = null; + boolean wildcardSeen = false; for (int i = 0; i < expressions.size(); i++) { String expression = expressions.get(i); if (aliasOrIndexExists(metaData, expression)) { @@ -599,13 +599,14 @@ public class IndexNameExpressionResolver extends AbstractComponent { } expression = expression.substring(1); } else if (expression.charAt(0) == '-') { - // if its the first, fill it with all the indices... - if (i == 0) { - List concreteIndices = resolveEmptyOrTrivialWildcard(options, metaData, false); - result = new HashSet<>(concreteIndices); + // if there is a negation without a wildcard being previously seen, add it verbatim, + // otherwise return the expression + if (wildcardSeen) { + add = false; + expression = expression.substring(1); + } else { + add = true; } - add = false; - expression = expression.substring(1); } if (result == null) { // add all the previous ones... @@ -635,6 +636,10 @@ public class IndexNameExpressionResolver extends AbstractComponent { if (!noIndicesAllowedOrMatches(options, matches)) { throw infe(expression); } + + if (Regex.isSimpleMatchPattern(expression)) { + wildcardSeen = true; + } } return result; } @@ -848,12 +853,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { DateTimeFormatter parser = dateFormatter.withZone(timeZone); FormatDateTimeFormatter formatter = new FormatDateTimeFormatter(dateFormatterPattern, parser, Locale.ROOT); DateMathParser dateMathParser = new DateMathParser(formatter); - long millis = dateMathParser.parse(mathExpression, new Callable() { - @Override - public Long call() throws Exception { - return context.getStartTime(); - } - }, false, timeZone); + long millis = dateMathParser.parse(mathExpression, context::getStartTime, false, timeZone); String time = formatter.printer().print(millis); beforePlaceHolderSb.append(time); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 5bf08dbce90..e719bac6188 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -42,9 +42,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -/** - * - */ public class IndexTemplateMetaData extends AbstractDiffable { public static final IndexTemplateMetaData PROTO = IndexTemplateMetaData.builder("").build(); @@ -222,7 +219,7 @@ public class IndexTemplateMetaData extends AbstractDiffable, Diffable, Fr SNAPSHOT } + /** + * Indicates that this custom metadata will be returned as part of an API call but will not be persisted + */ public static EnumSet API_ONLY = EnumSet.of(XContentContext.API); + + /** + * Indicates that this custom metadata will be returned as part of an API call and will be persisted between + * node restarts, but will not be a part of a snapshot global state + */ public static EnumSet API_AND_GATEWAY = EnumSet.of(XContentContext.API, XContentContext.GATEWAY); + + /** + * Indicates that this custom metadata will be returned as part of an API call and stored as a part of + * a snapshot global state, but will not be persisted between node restarts + */ public static EnumSet API_AND_SNAPSHOT = EnumSet.of(XContentContext.API, XContentContext.SNAPSHOT); + /** + * Indicates that this custom metadata will be returned as part of an API call, stored as a part of + * a snapshot global state, and will be persisted between node restarts + */ + public static EnumSet ALL_CONTEXTS = EnumSet.allOf(XContentContext.class); + public interface Custom extends Diffable, ToXContent { String type(); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index c8ce8d74b70..ab317abe716 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; @@ -63,7 +64,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; @@ -88,6 +88,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiFunction; import java.util.function.Predicate; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS; @@ -109,7 +110,6 @@ public class MetaDataCreateIndexService extends AbstractComponent { private final AllocationService allocationService; private final AliasValidator aliasValidator; private final Environment env; - private final NodeServicesProvider nodeServicesProvider; private final IndexScopedSettings indexScopedSettings; private final ActiveShardsObserver activeShardsObserver; @@ -117,37 +117,48 @@ public class MetaDataCreateIndexService extends AbstractComponent { public MetaDataCreateIndexService(Settings settings, ClusterService clusterService, IndicesService indicesService, AllocationService allocationService, AliasValidator aliasValidator, Environment env, - NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings, - ThreadPool threadPool) { + IndexScopedSettings indexScopedSettings, ThreadPool threadPool) { super(settings); this.clusterService = clusterService; this.indicesService = indicesService; this.allocationService = allocationService; this.aliasValidator = aliasValidator; this.env = env; - this.nodeServicesProvider = nodeServicesProvider; this.indexScopedSettings = indexScopedSettings; this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); } + /** + * Validate the name for an index against some static rules and a cluster state. + */ public static void validateIndexName(String index, ClusterState state) { + validateIndexOrAliasName(index, InvalidIndexNameException::new); + if (!index.toLowerCase(Locale.ROOT).equals(index)) { + throw new InvalidIndexNameException(index, "must be lowercase"); + } if (state.routingTable().hasIndex(index)) { throw new IndexAlreadyExistsException(state.routingTable().index(index).getIndex()); } if (state.metaData().hasIndex(index)) { throw new IndexAlreadyExistsException(state.metaData().index(index).getIndex()); } + if (state.metaData().hasAlias(index)) { + throw new InvalidIndexNameException(index, "already exists as alias"); + } + } + + /** + * Validate the name for an index or alias against some static rules. + */ + public static void validateIndexOrAliasName(String index, BiFunction exceptionCtor) { if (!Strings.validFileName(index)) { - throw new InvalidIndexNameException(index, "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS); + throw exceptionCtor.apply(index, "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS); } if (index.contains("#")) { - throw new InvalidIndexNameException(index, "must not contain '#'"); + throw exceptionCtor.apply(index, "must not contain '#'"); } if (index.charAt(0) == '_' || index.charAt(0) == '-' || index.charAt(0) == '+') { - throw new InvalidIndexNameException(index, "must not start with '_', '-', or '+'"); - } - if (!index.toLowerCase(Locale.ROOT).equals(index)) { - throw new InvalidIndexNameException(index, "must be lowercase"); + throw exceptionCtor.apply(index, "must not start with '_', '-', or '+'"); } int byteCount = 0; try { @@ -157,15 +168,10 @@ public class MetaDataCreateIndexService extends AbstractComponent { throw new ElasticsearchException("Unable to determine length of index name", e); } if (byteCount > MAX_INDEX_NAME_BYTES) { - throw new InvalidIndexNameException(index, - "index name is too long, (" + byteCount + - " > " + MAX_INDEX_NAME_BYTES + ")"); - } - if (state.metaData().hasAlias(index)) { - throw new InvalidIndexNameException(index, "already exists as alias"); + throw exceptionCtor.apply(index, "index name is too long, (" + byteCount + " > " + MAX_INDEX_NAME_BYTES + ")"); } if (index.equals(".") || index.equals("..")) { - throw new InvalidIndexNameException(index, "must not be '.' or '..'"); + throw exceptionCtor.apply(index, "must not be '.' or '..'"); } } @@ -318,7 +324,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) { indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis()); } - + indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName()); indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); final Index shrinkFromIndex = request.shrinkFrom(); int routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build());; @@ -344,8 +350,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { (tmpImd.getNumberOfReplicas() + 1) + "]"); } // create the index here (on the master) to validate it can be created, as well as adding the mapping - final IndexService indexService = indicesService.createIndex(nodeServicesProvider, tmpImd, - Collections.emptyList(), shardId -> {}); + final IndexService indexService = indicesService.createIndex(tmpImd, Collections.emptyList(), shardId -> {}); createdIndex = indexService.index(); // now add the mappings MapperService mapperService = indexService.mapperService(); @@ -356,7 +361,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { throw mpe; } - final QueryShardContext queryShardContext = indexService.newQueryShardContext(); + // the context is only used for validation so it's fine to pass fake values for the shard id and the current + // timestamp + final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L); for (Alias alias : request.aliases()) { if (Strings.hasLength(alias.filter())) { aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java index 22553dd9929..7dbc06cba0f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java @@ -23,20 +23,23 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.delete.DeleteIndexClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; +import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.snapshots.SnapshotsService; import java.util.Arrays; -import java.util.Collection; import java.util.Set; import static java.util.stream.Collectors.toSet; @@ -63,7 +66,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent { throw new IllegalArgumentException("Index name is required"); } - clusterService.submitStateUpdateTask("delete-index " + request.indices(), + clusterService.submitStateUpdateTask("delete-index " + Arrays.toString(request.indices()), new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override @@ -73,7 +76,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent { @Override public ClusterState execute(final ClusterState currentState) { - return deleteIndices(currentState, Arrays.asList(request.indices())); + return deleteIndices(currentState, Sets.newHashSet(request.indices())); } }); } @@ -81,7 +84,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent { /** * Delete some indices from the cluster state. */ - public ClusterState deleteIndices(ClusterState currentState, Collection indices) { + public ClusterState deleteIndices(ClusterState currentState, Set indices) { final MetaData meta = currentState.metaData(); final Set metaDatas = indices.stream().map(i -> meta.getIndexSafe(i)).collect(toSet()); // Check if index deletion conflicts with any running snapshots @@ -107,11 +110,25 @@ public class MetaDataDeleteIndexService extends AbstractComponent { MetaData newMetaData = metaDataBuilder.build(); ClusterBlocks blocks = clusterBlocksBuilder.build(); + + // update snapshot restore entries + ImmutableOpenMap customs = currentState.getCustoms(); + final RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE); + if (restoreInProgress != null) { + RestoreInProgress updatedRestoreInProgress = RestoreService.updateRestoreStateWithDeletedIndices(restoreInProgress, indices); + if (updatedRestoreInProgress != restoreInProgress) { + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(customs); + builder.put(RestoreInProgress.TYPE, updatedRestoreInProgress); + customs = builder.build(); + } + } + return allocationService.reroute( ClusterState.builder(currentState) .routingTable(routingTableBuilder.build()) .metaData(newMetaData) .blocks(blocks) + .customs(customs) .build(), "deleted indices [" + indices + "]"); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 0bf338d251b..0c0d3f576a5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest; @@ -36,7 +37,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.indices.IndicesService; @@ -62,8 +62,6 @@ public class MetaDataIndexAliasesService extends AbstractComponent { private final AliasValidator aliasValidator; - private final NodeServicesProvider nodeServicesProvider; - private final MetaDataDeleteIndexService deleteIndexService; @Inject @@ -72,13 +70,11 @@ public class MetaDataIndexAliasesService extends AbstractComponent { ClusterService clusterService, IndicesService indicesService, AliasValidator aliasValidator, - NodeServicesProvider nodeServicesProvider, MetaDataDeleteIndexService deleteIndexService) { super(settings); this.clusterService = clusterService; this.indicesService = indicesService; this.aliasValidator = aliasValidator; - this.nodeServicesProvider = nodeServicesProvider; this.deleteIndexService = deleteIndexService; } @@ -144,7 +140,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { if (indexService == null) { // temporarily create the index and add mappings so we can parse the filter try { - indexService = indicesService.createIndex(nodeServicesProvider, index, emptyList(), shardId -> {}); + indexService = indicesService.createIndex(index, emptyList(), shardId -> {}); } catch (IOException e) { throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e); } @@ -157,7 +153,9 @@ public class MetaDataIndexAliasesService extends AbstractComponent { } indices.put(action.getIndex(), indexService); } - aliasValidator.validateAliasFilter(alias, filter, indexService.newQueryShardContext()); + // the context is only used for validation so it's fine to pass fake values for the shard id and the current + // timestamp + aliasValidator.validateAliasFilter(alias, filter, indexService.newQueryShardContext(0, null, () -> 0L)); } }; changed |= action.apply(newAliasValidator, metadata, index); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index fd7c34dbe6c..689eff0da61 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.RestoreService; @@ -61,15 +60,13 @@ public class MetaDataIndexStateService extends AbstractComponent { private final AllocationService allocationService; private final MetaDataIndexUpgradeService metaDataIndexUpgradeService; - private final NodeServicesProvider nodeServiceProvider; private final IndicesService indicesService; @Inject public MetaDataIndexStateService(Settings settings, ClusterService clusterService, AllocationService allocationService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, - NodeServicesProvider nodeServicesProvider, IndicesService indicesService) { + IndicesService indicesService) { super(settings); - this.nodeServiceProvider = nodeServicesProvider; this.indicesService = indicesService; this.clusterService = clusterService; this.allocationService = allocationService; @@ -170,7 +167,7 @@ public class MetaDataIndexStateService extends AbstractComponent { // We need to check that this index can be upgraded to the current version indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData); try { - indicesService.verifyIndexMetadata(nodeServiceProvider, indexMetaData, indexMetaData); + indicesService.verifyIndexMetadata(indexMetaData, indexMetaData); } catch (Exception e) { throw new ElasticsearchException("Failed to verify index " + indexMetaData.getIndex(), e); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 0499b42f7d7..104cba9e456 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -37,7 +38,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.indices.IndexTemplateAlreadyExistsException; @@ -63,21 +63,18 @@ public class MetaDataIndexTemplateService extends AbstractComponent { private final AliasValidator aliasValidator; private final IndicesService indicesService; private final MetaDataCreateIndexService metaDataCreateIndexService; - private final NodeServicesProvider nodeServicesProvider; private final IndexScopedSettings indexScopedSettings; @Inject public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService, MetaDataCreateIndexService metaDataCreateIndexService, AliasValidator aliasValidator, IndicesService indicesService, - NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings) { super(settings); this.clusterService = clusterService; this.aliasValidator = aliasValidator; this.indicesService = indicesService; this.metaDataCreateIndexService = metaDataCreateIndexService; - this.nodeServicesProvider = nodeServicesProvider; this.indexScopedSettings = indexScopedSettings; } @@ -167,7 +164,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent { throw new IndexTemplateAlreadyExistsException(request.name); } - validateAndAddTemplate(request, templateBuilder, indicesService, nodeServicesProvider); + validateAndAddTemplate(request, templateBuilder, indicesService); for (Alias alias : request.aliases) { AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter()) @@ -191,8 +188,8 @@ public class MetaDataIndexTemplateService extends AbstractComponent { }); } - private static void validateAndAddTemplate(final PutRequest request, IndexTemplateMetaData.Builder templateBuilder, IndicesService indicesService, - NodeServicesProvider nodeServicesProvider) throws Exception { + private static void validateAndAddTemplate(final PutRequest request, IndexTemplateMetaData.Builder templateBuilder, + IndicesService indicesService) throws Exception { Index createdIndex = null; final String temporaryIndexName = UUIDs.randomBase64UUID(); try { @@ -207,7 +204,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent { .build(); final IndexMetaData tmpIndexMetadata = IndexMetaData.builder(temporaryIndexName).settings(dummySettings).build(); - IndexService dummyIndexService = indicesService.createIndex(nodeServicesProvider, tmpIndexMetadata, Collections.emptyList(), shardId -> {}); + IndexService dummyIndexService = indicesService.createIndex(tmpIndexMetadata, Collections.emptyList(), shardId -> {}); createdIndex = dummyIndexService.index(); templateBuilder.order(request.order); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 13687ed362f..b261b8850c6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.Version; import org.elasticsearch.common.component.AbstractComponent; @@ -26,6 +28,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService; @@ -115,7 +118,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { // been started yet. However, we don't really need real analyzers at this stage - so we can fake it IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); - final NamedAnalyzer fakeDefault = new NamedAnalyzer("fake_default", new Analyzer() { + final NamedAnalyzer fakeDefault = new NamedAnalyzer("fake_default", AnalyzerScope.INDEX, new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName) { throw new UnsupportedOperationException("shouldn't be here"); @@ -128,7 +131,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { @Override public NamedAnalyzer get(Object key) { assert key instanceof String : "key must be a string but was: " + key.getClass(); - return new NamedAnalyzer((String)key, fakeDefault.analyzer()); + return new NamedAnalyzer((String)key, AnalyzerScope.INDEX, fakeDefault.analyzer()); } @Override @@ -160,7 +163,10 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) { final Settings settings = indexMetaData.getSettings(); - final Settings upgrade = indexScopedSettings.archiveUnknownOrBrokenSettings(settings); + final Settings upgrade = indexScopedSettings.archiveUnknownOrInvalidSettings( + settings, + e -> logger.warn("{} ignoring unknown index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), + (e, ex) -> logger.warn((Supplier) () -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), ex)); if (upgrade != settings) { return IndexMetaData.builder(indexMetaData).settings(upgrade).build(); } else { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 694f52f3da4..c6f5ddc1fa6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; @@ -41,7 +42,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.indices.IndicesService; @@ -65,15 +65,13 @@ public class MetaDataMappingService extends AbstractComponent { final ClusterStateTaskExecutor refreshExecutor = new RefreshTaskExecutor(); final ClusterStateTaskExecutor putMappingExecutor = new PutMappingExecutor(); - private final NodeServicesProvider nodeServicesProvider; @Inject - public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) { + public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService) { super(settings); this.clusterService = clusterService; this.indicesService = indicesService; - this.nodeServicesProvider = nodeServicesProvider; } static class RefreshTask { @@ -146,7 +144,7 @@ public class MetaDataMappingService extends AbstractComponent { IndexService indexService = indicesService.indexService(indexMetaData.getIndex()); if (indexService == null) { // we need to create the index here, and add the current mapping to it, so we can merge - indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList(), shardId -> {}); + indexService = indicesService.createIndex(indexMetaData, Collections.emptyList(), shardId -> {}); removeIndex = true; for (ObjectCursor metaData : indexMetaData.getMappings().values()) { // don't apply the default mapping, it has been applied when the mapping was created @@ -229,8 +227,8 @@ public class MetaDataMappingService extends AbstractComponent { // if the index does not exists we create it once, add all types to the mapper service and // close it later once we are done with mapping update indicesToClose.add(indexMetaData.getIndex()); - IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, - Collections.emptyList(), shardId -> {}); + IndexService indexService = + indicesService.createIndex(indexMetaData, Collections.emptyList(), shardId -> {}); // add mappings for all types, we need them for cross-type validation for (ObjectCursor mapping : indexMetaData.getMappings().values()) { indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 41dd64cf86c..2b937328106 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -43,7 +43,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.indices.IndicesService; import java.io.IOException; @@ -66,18 +65,16 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements private final IndexScopedSettings indexScopedSettings; private final IndicesService indicesService; - private final NodeServicesProvider nodeServiceProvider; @Inject public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, - IndexScopedSettings indexScopedSettings, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) { + IndexScopedSettings indexScopedSettings, IndicesService indicesService) { super(settings); this.clusterService = clusterService; this.clusterService.add(this); this.allocationService = allocationService; this.indexScopedSettings = indexScopedSettings; this.indicesService = indicesService; - this.nodeServiceProvider = nodeServicesProvider; } @Override @@ -227,6 +224,9 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements int updatedNumberOfReplicas = openSettings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, -1); if (updatedNumberOfReplicas != -1 && preserveExisting == false) { + // we do *not* update the in sync allocation ids as they will be removed upon the first index + // operation which make these copies stale + // TODO: update the list once the data is deleted by the node? routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); metaDataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices); @@ -275,12 +275,12 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements for (Index index : openIndices) { final IndexMetaData currentMetaData = currentState.getMetaData().getIndexSafe(index); final IndexMetaData updatedMetaData = updatedState.metaData().getIndexSafe(index); - indicesService.verifyIndexMetadata(nodeServiceProvider, currentMetaData, updatedMetaData); + indicesService.verifyIndexMetadata(currentMetaData, updatedMetaData); } for (Index index : closeIndices) { final IndexMetaData currentMetaData = currentState.getMetaData().getIndexSafe(index); final IndexMetaData updatedMetaData = updatedState.metaData().getIndexSafe(index); - indicesService.verifyIndexMetadata(nodeServiceProvider, currentMetaData, updatedMetaData); + indicesService.verifyIndexMetadata(currentMetaData, updatedMetaData); } } catch (IOException ex) { throw ExceptionsHelper.convertToElastic(ex); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/ProcessClusterEventTimeoutException.java b/core/src/main/java/org/elasticsearch/cluster/metadata/ProcessClusterEventTimeoutException.java index f161f8c55ee..c648c3e633d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/ProcessClusterEventTimeoutException.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/ProcessClusterEventTimeoutException.java @@ -26,8 +26,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - */ public class ProcessClusterEventTimeoutException extends ElasticsearchException { public ProcessClusterEventTimeoutException(TimeValue timeValue, String source) { diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 55be77d201f..e90ada51022 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.transport.TransportAddressSerializers; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.node.Node; @@ -40,7 +39,6 @@ import java.util.Map; import java.util.Set; import java.util.function.Predicate; -import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream; /** * A discovery node represents a node that is part of the cluster. @@ -137,7 +135,7 @@ public class DiscoveryNode implements Writeable, ToXContent { */ public DiscoveryNode(String nodeName, String nodeId, TransportAddress address, Map attributes, Set roles, Version version) { - this(nodeName, nodeId, UUIDs.randomBase64UUID(), address.getHost(), address.getAddress(), address, attributes, roles, version); + this(nodeName, nodeId, UUIDs.randomBase64UUID(), address.getAddress(), address.getAddress(), address, attributes, roles, version); } /** @@ -217,7 +215,7 @@ public class DiscoveryNode implements Writeable, ToXContent { this.ephemeralId = in.readString().intern(); this.hostName = in.readString().intern(); this.hostAddress = in.readString().intern(); - this.address = TransportAddressSerializers.addressFromStream(in); + this.address = new TransportAddress(in); int size = in.readVInt(); this.attributes = new HashMap<>(size); for (int i = 0; i < size; i++) { @@ -242,7 +240,7 @@ public class DiscoveryNode implements Writeable, ToXContent { out.writeString(ephemeralId); out.writeString(hostName); out.writeString(hostAddress); - addressToStream(out, address); + address.writeTo(out); out.writeVInt(attributes.size()); for (Map.Entry entry : attributes.entrySet()) { out.writeString(entry.getKey()); diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java index e8ede54f4a2..fad86caa7cc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java @@ -24,13 +24,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import java.util.HashMap; import java.util.Map; -/** - */ public class DiscoveryNodeFilters { public enum OpType { @@ -82,8 +80,8 @@ public class DiscoveryNodeFilters { if ("_ip".equals(attr)) { // We check both the host_ip or the publish_ip String publishAddress = null; - if (node.getAddress() instanceof InetSocketTransportAddress) { - publishAddress = NetworkAddress.format(((InetSocketTransportAddress) node.getAddress()).address().getAddress()); + if (node.getAddress() instanceof TransportAddress) { + publishAddress = NetworkAddress.format(node.getAddress().address().getAddress()); } boolean match = matchByIP(values, node.getHostAddress(), publishAddress); @@ -116,8 +114,8 @@ public class DiscoveryNodeFilters { } else if ("_publish_ip".equals(attr)) { // We check explicitly only the publish_ip String address = null; - if (node.getAddress() instanceof InetSocketTransportAddress) { - address = NetworkAddress.format(((InetSocketTransportAddress) node.getAddress()).address().getAddress()); + if (node.getAddress() instanceof TransportAddress) { + address = NetworkAddress.format(node.getAddress().address().getAddress()); } boolean match = matchByIP(values, address, null); diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 9d0edf7b910..895195d35b3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -397,16 +397,6 @@ public class DiscoveryNodes extends AbstractDiffable implements @Override public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("{"); - for (DiscoveryNode node : this) { - sb.append(node).append(','); - } - sb.append("}"); - return sb.toString(); - } - - public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("nodes: \n"); for (DiscoveryNode node : this) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index ef3fae48301..94cb4b8c8e8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -68,11 +68,6 @@ public class OperationRouting extends AbstractComponent { return preferenceActiveShardIterator(indexShard, clusterState.nodes().getLocalNodeId(), clusterState.nodes(), preference); } - public int searchShardsCount(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing) { - final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); - return shards.size(); - } - public GroupShardsIterator searchShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing, @Nullable String preference) { final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); final Set set = new HashSet<>(shards.size()); @@ -126,7 +121,7 @@ public class OperationRouting extends AbstractComponent { Preference preferenceType = Preference.parse(preference); if (preferenceType == Preference.SHARDS) { // starts with _shards, so execute on specific ones - int index = preference.indexOf(';'); + int index = preference.indexOf('|'); String shards; if (index == -1) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java index 8403f45a550..4ba277d99cd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java @@ -19,18 +19,15 @@ package org.elasticsearch.cluster.routing; -import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.shard.ShardId; import java.util.ArrayList; import java.util.Collections; -import java.util.Comparator; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; -import java.util.Map; /** * A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards @@ -103,7 +100,8 @@ public class RoutingNode implements Iterable { */ void add(ShardRouting shard) { if (shards.containsKey(shard.shardId())) { - throw new IllegalStateException("Trying to add a shard " + shard.shardId() + " to a node [" + nodeId + "] where it already exists"); + throw new IllegalStateException("Trying to add a shard " + shard.shardId() + " to a node [" + nodeId + + "] where it already exists. current [" + shards.get(shard.shardId()) + "]. new [" + shard + "]"); } shards.put(shard.shardId(), shard); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index bd5113029c4..ddb7969f60a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -391,7 +391,8 @@ public class RoutingNodes implements Iterable { return shards; } - public String prettyPrint() { + @Override + public String toString() { StringBuilder sb = new StringBuilder("routing_nodes:\n"); for (RoutingNode routingNode : this) { sb.append(routingNode.prettyPrint()); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 9dd2cc72da8..1c3d629a72f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -109,7 +109,7 @@ public class RoutingService extends AbstractLifecycleComponent { rerouting.set(false); ClusterState state = clusterService.state(); if (logger.isTraceEnabled()) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state.prettyPrint()), e); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e); } else { logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); } @@ -118,7 +118,7 @@ public class RoutingService extends AbstractLifecycleComponent { } catch (Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); - logger.warn((Supplier) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state.prettyPrint()), e); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 2d960ce0450..051fd12a12b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -613,7 +613,8 @@ public class RoutingTable implements Iterable, Diffable entry : indicesRouting) { sb.append(entry.value.prettyPrint()).append('\n'); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index e441fd81113..4db922d5aeb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -390,7 +390,7 @@ public final class ShardRouting implements Writeable, ToXContent { assert primary : this; return new ShardRouting(shardId, currentNodeId, null, primary, ShardRoutingState.INITIALIZING, StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null), - AllocationId.newInitializing(), UNAVAILABLE_EXPECTED_SHARD_SIZE); + allocationId, UNAVAILABLE_EXPECTED_SHARD_SIZE); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 4670e1e4736..3726bac781e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -185,15 +185,15 @@ public final class UnassignedInfo implements ToXContent, Writeable { } } - public static AllocationStatus fromDecision(Decision decision) { + public static AllocationStatus fromDecision(Decision.Type decision) { Objects.requireNonNull(decision); - switch (decision.type()) { + switch (decision) { case NO: return DECIDERS_NO; case THROTTLE: return DECIDERS_THROTTLED; default: - throw new IllegalArgumentException("no allocation attempt from decision[" + decision.type() + "]"); + throw new IllegalArgumentException("no allocation attempt from decision[" + decision + "]"); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 323adf78046..b3eaa517934 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -34,6 +35,7 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -94,17 +96,24 @@ public class AllocationService extends AbstractComponent { } protected ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason) { - return buildResultAndLogHealthChange(oldState, allocation, reason, new RoutingExplanations()); - } - - protected ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason, - RoutingExplanations explanations) { RoutingTable oldRoutingTable = oldState.routingTable(); RoutingNodes newRoutingNodes = allocation.routingNodes(); final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(oldRoutingTable.version(), newRoutingNodes).build(); MetaData newMetaData = allocation.updateMetaDataWithRoutingChanges(newRoutingTable); assert newRoutingTable.validate(newMetaData); // validates the routing table is coherent with the cluster state metadata - final ClusterState newState = ClusterState.builder(oldState).routingTable(newRoutingTable).metaData(newMetaData).build(); + final ClusterState.Builder newStateBuilder = ClusterState.builder(oldState) + .routingTable(newRoutingTable) + .metaData(newMetaData); + final RestoreInProgress restoreInProgress = allocation.custom(RestoreInProgress.TYPE); + if (restoreInProgress != null) { + RestoreInProgress updatedRestoreInProgress = allocation.updateRestoreInfoWithRoutingChanges(restoreInProgress); + if (updatedRestoreInProgress != restoreInProgress) { + ImmutableOpenMap.Builder customsBuilder = ImmutableOpenMap.builder(allocation.getCustoms()); + customsBuilder.put(RestoreInProgress.TYPE, updatedRestoreInProgress); + newStateBuilder.customs(customsBuilder.build()); + } + } + final ClusterState newState = newStateBuilder.build(); logClusterHealthStateChange( new ClusterStateHealth(oldState), new ClusterStateHealth(newState), diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java index f476972b216..fa30a102bf6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java @@ -93,11 +93,6 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting removeAllocationId(removedRelocationSource); } - @Override - public void startedPrimaryReinitialized(ShardRouting startedPrimaryShard, ShardRouting initializedShard) { - removeAllocationId(startedPrimaryShard); - } - /** * Updates the current {@link MetaData} based on the changes of this RoutingChangesObserver. Specifically * we update {@link IndexMetaData#getInSyncAllocationIds()} and {@link IndexMetaData#primaryTerm(int)} based on @@ -238,7 +233,7 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting Set oldInSyncAllocations = oldIndexMetaData.inSyncAllocationIds(shardNumber); Set idsToRemove = shardEntry.getValue().stream().map(e -> e.getAllocationId()).collect(Collectors.toSet()); assert idsToRemove.stream().allMatch(id -> oldRoutingTable.getByAllocationId(shardEntry.getKey(), id) == null) : - "removing stale ids: " + idsToRemove + ", some of which have still a routing entry: " + oldRoutingTable.prettyPrint(); + "removing stale ids: " + idsToRemove + ", some of which have still a routing entry: " + oldRoutingTable; Set remainingInSyncAllocations = Sets.difference(oldInSyncAllocations, idsToRemove); assert remainingInSyncAllocations.isEmpty() == false : "Set of in-sync ids cannot become empty for shard " + shardEntry.getKey() + " (before: " + oldInSyncAllocations + ", ids to remove: " + idsToRemove + ")"; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index 8429493b0e7..886b42f57d6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingChangesObserver; @@ -30,6 +31,8 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.snapshots.RestoreService; +import org.elasticsearch.snapshots.RestoreService.RestoreInProgressUpdater; import java.util.HashMap; import java.util.HashSet; @@ -76,8 +79,9 @@ public class RoutingAllocation { private final IndexMetaDataUpdater indexMetaDataUpdater = new IndexMetaDataUpdater(); private final RoutingNodesChangedObserver nodesChangedObserver = new RoutingNodesChangedObserver(); + private final RestoreInProgressUpdater restoreInProgressUpdater = new RestoreInProgressUpdater(); private final RoutingChangesObserver routingChangesObserver = new RoutingChangesObserver.DelegatingRoutingChangesObserver( - nodesChangedObserver, indexMetaDataUpdater + nodesChangedObserver, indexMetaDataUpdater, restoreInProgressUpdater ); @@ -154,6 +158,10 @@ public class RoutingAllocation { return (T)customs.get(key); } + public ImmutableOpenMap getCustoms() { + return customs; + } + /** * Get explanations of current routing * @return explanation of routing @@ -234,6 +242,13 @@ public class RoutingAllocation { return indexMetaDataUpdater.applyChanges(metaData, newRoutingTable); } + /** + * Returns updated {@link RestoreInProgress} based on the changes that were made to the routing nodes + */ + public RestoreInProgress updateRestoreInfoWithRoutingChanges(RestoreInProgress restoreInProgress) { + return restoreInProgressUpdater.applyChanges(restoreInProgress); + } + /** * Returns true iff changes were made to the routing nodes */ diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java new file mode 100644 index 00000000000..74fd7668a01 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java @@ -0,0 +1,328 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.Nullable; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Represents the allocation decision by an allocator for a shard. + */ +public class ShardAllocationDecision { + /** a constant representing a shard decision where no decision was taken */ + public static final ShardAllocationDecision DECISION_NOT_TAKEN = + new ShardAllocationDecision(null, null, null, null, null, null, null); + /** + * a map of cached common no/throttle decisions that don't need explanations, + * this helps prevent unnecessary object allocations for the non-explain API case + */ + private static final Map CACHED_DECISIONS; + static { + Map cachedDecisions = new HashMap<>(); + cachedDecisions.put(AllocationStatus.FETCHING_SHARD_DATA, + new ShardAllocationDecision(Type.NO, AllocationStatus.FETCHING_SHARD_DATA, null, null, null, null, null)); + cachedDecisions.put(AllocationStatus.NO_VALID_SHARD_COPY, + new ShardAllocationDecision(Type.NO, AllocationStatus.NO_VALID_SHARD_COPY, null, null, null, null, null)); + cachedDecisions.put(AllocationStatus.DECIDERS_NO, + new ShardAllocationDecision(Type.NO, AllocationStatus.DECIDERS_NO, null, null, null, null, null)); + cachedDecisions.put(AllocationStatus.DECIDERS_THROTTLED, + new ShardAllocationDecision(Type.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, null, null, null, null, null)); + cachedDecisions.put(AllocationStatus.DELAYED_ALLOCATION, + new ShardAllocationDecision(Type.NO, AllocationStatus.DELAYED_ALLOCATION, null, null, null, null, null)); + CACHED_DECISIONS = Collections.unmodifiableMap(cachedDecisions); + } + + @Nullable + private final Type finalDecision; + @Nullable + private final AllocationStatus allocationStatus; + @Nullable + private final String finalExplanation; + @Nullable + private final String assignedNodeId; + @Nullable + private final String allocationId; + @Nullable + private final Map nodeDecisions; + @Nullable + private final Decision shardDecision; + + private ShardAllocationDecision(Type finalDecision, + AllocationStatus allocationStatus, + String finalExplanation, + String assignedNodeId, + String allocationId, + Map nodeDecisions, + Decision shardDecision) { + assert assignedNodeId != null || finalDecision == null || finalDecision != Type.YES : + "a yes decision must have a node to assign the shard to"; + assert allocationStatus != null || finalDecision == null || finalDecision == Type.YES : + "only a yes decision should not have an allocation status"; + assert allocationId == null || assignedNodeId != null : + "allocation id can only be null if the assigned node is null"; + this.finalDecision = finalDecision; + this.allocationStatus = allocationStatus; + this.finalExplanation = finalExplanation; + this.assignedNodeId = assignedNodeId; + this.allocationId = allocationId; + this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null; + this.shardDecision = shardDecision; + } + + /** + * Returns a NO decision with the given shard-level decision and explanation (if in explain mode). + */ + public static ShardAllocationDecision no(Decision shardDecision, @Nullable String explanation) { + if (explanation != null) { + return new ShardAllocationDecision(Type.NO, AllocationStatus.DECIDERS_NO, explanation, null, null, null, shardDecision); + } else { + return getCachedDecision(AllocationStatus.DECIDERS_NO); + } + } + + /** + * Returns a NO decision with the given {@link AllocationStatus} and explanation for the NO decision, if in explain mode. + */ + public static ShardAllocationDecision no(AllocationStatus allocationStatus, @Nullable String explanation) { + return no(allocationStatus, explanation, null); + } + + /** + * Returns a NO decision with the given {@link AllocationStatus}, and the explanation for the NO decision + * as well as the individual node-level decisions that comprised the final NO decision if in explain mode. + */ + public static ShardAllocationDecision no(AllocationStatus allocationStatus, @Nullable String explanation, + @Nullable Map nodeDecisions) { + Objects.requireNonNull(allocationStatus, "allocationStatus must not be null"); + if (explanation != null) { + return new ShardAllocationDecision(Type.NO, allocationStatus, explanation, null, null, asExplanations(nodeDecisions), null); + } else { + return getCachedDecision(allocationStatus); + } + } + + /** + * Returns a THROTTLE decision, with the given explanation and individual node-level decisions that + * comprised the final THROTTLE decision if in explain mode. + */ + public static ShardAllocationDecision throttle(@Nullable String explanation, @Nullable Map nodeDecisions) { + if (explanation != null) { + return new ShardAllocationDecision(Type.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, explanation, null, null, + asExplanations(nodeDecisions), null); + } else { + return getCachedDecision(AllocationStatus.DECIDERS_THROTTLED); + } + } + + /** + * Creates a YES decision with the given explanation and individual node-level decisions that + * comprised the final YES decision, along with the node id to which the shard is assigned and + * the allocation id for the shard, if available. + */ + public static ShardAllocationDecision yes(String assignedNodeId, @Nullable String explanation, @Nullable String allocationId, + @Nullable Map nodeDecisions) { + Objects.requireNonNull(assignedNodeId, "assignedNodeId must not be null"); + return new ShardAllocationDecision(Type.YES, null, explanation, assignedNodeId, allocationId, asExplanations(nodeDecisions), null); + } + + /** + * Creates a {@link ShardAllocationDecision} from the given {@link Decision} and the assigned node, if any. + */ + public static ShardAllocationDecision fromDecision(Decision decision, @Nullable String assignedNodeId, boolean explain, + @Nullable Map nodeDecisions) { + final Type decisionType = decision.type(); + AllocationStatus allocationStatus = decisionType != Type.YES ? AllocationStatus.fromDecision(decisionType) : null; + String explanation = null; + if (explain) { + if (decision.type() == Type.YES) { + assert assignedNodeId != null; + explanation = "shard assigned to node [" + assignedNodeId + "]"; + } else if (decision.type() == Type.THROTTLE) { + assert assignedNodeId != null; + explanation = "shard assignment throttled on node [" + assignedNodeId + "]"; + } else { + explanation = "shard cannot be assigned to any node in the cluster"; + } + } + return new ShardAllocationDecision(decisionType, allocationStatus, explanation, assignedNodeId, null, nodeDecisions, null); + } + + private static ShardAllocationDecision getCachedDecision(AllocationStatus allocationStatus) { + ShardAllocationDecision decision = CACHED_DECISIONS.get(allocationStatus); + return Objects.requireNonNull(decision, "precomputed decision not found for " + allocationStatus); + } + + private static Map asExplanations(Map decisionMap) { + if (decisionMap != null) { + Map explanationMap = new HashMap<>(); + for (Map.Entry entry : decisionMap.entrySet()) { + explanationMap.put(entry.getKey(), new WeightedDecision(entry.getValue(), Float.POSITIVE_INFINITY)); + } + return explanationMap; + } + return null; + } + + /** + * Returns true if a decision was taken by the allocator, {@code false} otherwise. + * If no decision was taken, then the rest of the fields in this object are meaningless and return {@code null}. + */ + public boolean isDecisionTaken() { + return finalDecision != null; + } + + /** + * Returns the final decision made by the allocator on whether to assign the shard. + * This value can only be {@code null} if {@link #isDecisionTaken()} returns {@code false}. + */ + @Nullable + public Type getFinalDecisionType() { + return finalDecision; + } + + /** + * Returns the final decision made by the allocator on whether to assign the shard. + * Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will + * throw an {@code IllegalArgumentException}. + */ + public Type getFinalDecisionSafe() { + if (isDecisionTaken() == false) { + throw new IllegalArgumentException("decision must have been taken in order to return the final decision"); + } + return finalDecision; + } + + /** + * Returns the status of an unsuccessful allocation attempt. This value will be {@code null} if + * no decision was taken or if the decision was {@link Decision.Type#YES}. + */ + @Nullable + public AllocationStatus getAllocationStatus() { + return allocationStatus; + } + + /** + * Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecisionType()}. + */ + @Nullable + public String getFinalExplanation() { + return finalExplanation; + } + + /** + * Get the node id that the allocator will assign the shard to, unless {@link #getFinalDecisionType()} returns + * a value other than {@link Decision.Type#YES}, in which case this returns {@code null}. + */ + @Nullable + public String getAssignedNodeId() { + return assignedNodeId; + } + + /** + * Gets the allocation id for the existing shard copy that the allocator is assigning the shard to. + * This method returns a non-null value iff {@link #getAssignedNodeId()} returns a non-null value + * and the node on which the shard is assigned already has a shard copy with an in-sync allocation id + * that we can re-use. + */ + @Nullable + public String getAllocationId() { + return allocationId; + } + + /** + * Gets the individual node-level decisions that went into making the final decision as represented by + * {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link Decision} + * as the decision for the given node. + */ + @Nullable + public Map getNodeDecisions() { + return nodeDecisions; + } + + /** + * Gets the decision on allocating a shard, without examining any specific nodes to allocate to + * (e.g. a replica can never be allocated if the primary is not allocated, so this is a shard-level + * decision, not having taken any node into account). + */ + @Nullable + public Decision getShardDecision() { + return shardDecision; + } + + /** + * This class represents the shard allocation decision for a single node, + * including the {@link Decision} whether to allocate to the node and the + * weight assigned to the node for the shard in question. + */ + public static final class WeightedDecision { + + private final Decision decision; + private final float weight; + + public WeightedDecision(Decision decision) { + this.decision = Objects.requireNonNull(decision); + this.weight = Float.POSITIVE_INFINITY; + } + + public WeightedDecision(Decision decision, float weight) { + this.decision = Objects.requireNonNull(decision); + this.weight = Objects.requireNonNull(weight); + } + + /** + * The decision for allocating to the node. + */ + public Decision getDecision() { + return decision; + } + + /** + * The calculated weight for allocating a shard to the node. A value of {@link Float#POSITIVE_INFINITY} + * means the weight was not calculated or factored into the decision. + */ + public float getWeight() { + return weight; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + WeightedDecision that = (WeightedDecision) other; + return decision.equals(that.decision) && Float.compare(weight, that.weight) == 0; + } + + @Override + public int hashCode() { + return Objects.hash(decision, weight); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecision.java deleted file mode 100644 index 172360849fa..00000000000 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecision.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing.allocation; - -import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; -import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; -import org.elasticsearch.common.Nullable; - -import java.util.Collections; -import java.util.Map; -import java.util.Objects; - -/** - * Represents the allocation decision by an allocator for an unassigned shard. - */ -public class UnassignedShardDecision { - /** a constant representing a shard decision where no decision was taken */ - public static final UnassignedShardDecision DECISION_NOT_TAKEN = - new UnassignedShardDecision(null, null, null, null, null, null); - - @Nullable - private final Decision finalDecision; - @Nullable - private final AllocationStatus allocationStatus; - @Nullable - private final String finalExplanation; - @Nullable - private final String assignedNodeId; - @Nullable - private final String allocationId; - @Nullable - private final Map nodeDecisions; - - private UnassignedShardDecision(Decision finalDecision, - AllocationStatus allocationStatus, - String finalExplanation, - String assignedNodeId, - String allocationId, - Map nodeDecisions) { - assert finalExplanation != null || finalDecision == null : - "if a decision was taken, there must be an explanation for it"; - assert assignedNodeId != null || finalDecision == null || finalDecision.type() != Type.YES : - "a yes decision must have a node to assign the shard to"; - assert allocationStatus != null || finalDecision == null || finalDecision.type() == Type.YES : - "only a yes decision should not have an allocation status"; - assert allocationId == null || assignedNodeId != null : - "allocation id can only be null if the assigned node is null"; - this.finalDecision = finalDecision; - this.allocationStatus = allocationStatus; - this.finalExplanation = finalExplanation; - this.assignedNodeId = assignedNodeId; - this.allocationId = allocationId; - this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null; - } - - /** - * Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision. - */ - public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus, String explanation) { - return noDecision(allocationStatus, explanation, null); - } - - /** - * Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision, - * as well as the individual node-level decisions that comprised the final NO decision. - */ - public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus, - String explanation, - @Nullable Map nodeDecisions) { - Objects.requireNonNull(explanation, "explanation must not be null"); - Objects.requireNonNull(allocationStatus, "allocationStatus must not be null"); - return new UnassignedShardDecision(Decision.NO, allocationStatus, explanation, null, null, nodeDecisions); - } - - /** - * Creates a THROTTLE decision with the given explanation and individual node-level decisions that - * comprised the final THROTTLE decision. - */ - public static UnassignedShardDecision throttleDecision(String explanation, - Map nodeDecisions) { - Objects.requireNonNull(explanation, "explanation must not be null"); - return new UnassignedShardDecision(Decision.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, explanation, null, null, - nodeDecisions); - } - - /** - * Creates a YES decision with the given explanation and individual node-level decisions that - * comprised the final YES decision, along with the node id to which the shard is assigned and - * the allocation id for the shard, if available. - */ - public static UnassignedShardDecision yesDecision(String explanation, - String assignedNodeId, - @Nullable String allocationId, - Map nodeDecisions) { - Objects.requireNonNull(explanation, "explanation must not be null"); - Objects.requireNonNull(assignedNodeId, "assignedNodeId must not be null"); - return new UnassignedShardDecision(Decision.YES, null, explanation, assignedNodeId, allocationId, nodeDecisions); - } - - /** - * Returns true if a decision was taken by the allocator, {@code false} otherwise. - * If no decision was taken, then the rest of the fields in this object are meaningless and return {@code null}. - */ - public boolean isDecisionTaken() { - return finalDecision != null; - } - - /** - * Returns the final decision made by the allocator on whether to assign the unassigned shard. - * This value can only be {@code null} if {@link #isDecisionTaken()} returns {@code false}. - */ - @Nullable - public Decision getFinalDecision() { - return finalDecision; - } - - /** - * Returns the final decision made by the allocator on whether to assign the unassigned shard. - * Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will - * throw an {@code IllegalArgumentException}. - */ - public Decision getFinalDecisionSafe() { - if (isDecisionTaken() == false) { - throw new IllegalArgumentException("decision must have been taken in order to return the final decision"); - } - return finalDecision; - } - - /** - * Returns the status of an unsuccessful allocation attempt. This value will be {@code null} if - * no decision was taken or if the decision was {@link Decision.Type#YES}. - */ - @Nullable - public AllocationStatus getAllocationStatus() { - return allocationStatus; - } - - /** - * Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}. - */ - @Nullable - public String getFinalExplanation() { - return finalExplanation; - } - - /** - * Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}. - * Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will - * throw an {@code IllegalArgumentException}. - */ - public String getFinalExplanationSafe() { - if (isDecisionTaken() == false) { - throw new IllegalArgumentException("decision must have been taken in order to return the final explanation"); - } - return finalExplanation; - } - - /** - * Get the node id that the allocator will assign the shard to, unless {@link #getFinalDecision()} returns - * a value other than {@link Decision.Type#YES}, in which case this returns {@code null}. - */ - @Nullable - public String getAssignedNodeId() { - return assignedNodeId; - } - - /** - * Gets the allocation id for the existing shard copy that the allocator is assigning the shard to. - * This method returns a non-null value iff {@link #getAssignedNodeId()} returns a non-null value - * and the node on which the shard is assigned already has a shard copy with an in-sync allocation id - * that we can re-use. - */ - @Nullable - public String getAllocationId() { - return allocationId; - } - - /** - * Gets the individual node-level decisions that went into making the final decision as represented by - * {@link #getFinalDecision()}. The map that is returned has the node id as the key and a {@link Decision} - * as the decision for the given node. - */ - @Nullable - public Map getNodeDecisions() { - return nodeDecisions; - } -} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 7087ae57c4b..3a6c1c45f01 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -31,10 +31,13 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision.WeightedDecision; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -51,6 +54,7 @@ import java.util.HashSet; import java.util.IdentityHashMap; import java.util.Iterator; import java.util.Map; +import java.util.Objects; import java.util.Set; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; @@ -73,9 +77,9 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { public static final Setting INDEX_BALANCE_FACTOR_SETTING = - Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, Property.Dynamic, Property.NodeScope); + Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, 0.0f, Property.Dynamic, Property.NodeScope); public static final Setting SHARD_BALANCE_FACTOR_SETTING = - Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, Property.Dynamic, Property.NodeScope); + Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, 0.0f, Property.Dynamic, Property.NodeScope); public static final Setting THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, Property.Dynamic, Property.NodeScope); @@ -122,6 +126,18 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards balancer.balance(); } + /** + * Returns a decision on rebalancing a single shard to form a more optimal cluster balance. This + * method is not used in itself for cluster rebalancing because all shards from all indices are + * taken into account when making rebalancing decisions. This method is only intended to be used + * from the cluster allocation explain API to explain possible rebalancing decisions for a single + * shard. + */ + public RebalanceDecision decideRebalance(final ShardRouting shard, final RoutingAllocation allocation) { + assert allocation.debugDecision() : "debugDecision should be set in explain mode"; + return new Balancer(logger, allocation, weightFunction, threshold).decideRebalance(shard); + } + /** * Returns the currently configured delta threshold */ @@ -210,7 +226,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards */ public static class Balancer { private final Logger logger; - private final Map nodes = new HashMap<>(); + private final Map nodes; private final RoutingAllocation allocation; private final RoutingNodes routingNodes; private final WeightFunction weight; @@ -218,6 +234,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards private final float threshold; private final MetaData metaData; private final float avgShardsPerNode; + private final NodeSorter sorter; public Balancer(Logger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) { this.logger = logger; @@ -227,7 +244,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards this.routingNodes = allocation.routingNodes(); this.metaData = allocation.metaData(); avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size(); - buildModelFromAssigned(); + nodes = Collections.unmodifiableMap(buildModelFromAssigned()); + sorter = newNodeSorter(); } /** @@ -261,11 +279,18 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards return new NodeSorter(nodesArray(), weight, this); } + /** + * The absolute value difference between two weights. + */ private static float absDelta(float lower, float higher) { assert higher >= lower : higher + " lt " + lower +" but was expected to be gte"; return Math.abs(higher - lower); } + /** + * Returns {@code true} iff the weight delta between two nodes is under a defined threshold. + * See {@link #THRESHOLD_SETTING} for defining the threshold. + */ private static boolean lessThan(float delta, float threshold) { /* deltas close to the threshold are "rounded" to the threshold manually to prevent floating point problems if the delta is very close to the @@ -303,12 +328,115 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards balanceByWeights(); } + /** + * Makes a decision about moving a single shard to a different node to form a more + * optimally balanced cluster. This method is invoked from the cluster allocation + * explain API only. + */ + private RebalanceDecision decideRebalance(final ShardRouting shard) { + if (shard.started() == false) { + // cannot rebalance a shard that isn't started + return RebalanceDecision.NOT_TAKEN; + } + + Decision canRebalance = allocation.deciders().canRebalance(shard, allocation); + + if (allocation.hasPendingAsyncFetch()) { + return new RebalanceDecision( + canRebalance, + Type.NO, + "cannot rebalance due to in-flight shard store fetches, otherwise allocation may prematurely rebalance a shard to " + + "a node that is soon to receive another shard assignment upon completion of the shard store fetch, " + + "rendering the cluster imbalanced again" + ); + } + + sorter.reset(shard.getIndexName()); + ModelNode[] modelNodes = sorter.modelNodes; + final String currentNodeId = shard.currentNodeId(); + // find currently assigned node + ModelNode currentNode = null; + for (ModelNode node : modelNodes) { + if (node.getNodeId().equals(currentNodeId)) { + currentNode = node; + break; + } + } + assert currentNode != null : "currently assigned node could not be found"; + + // balance the shard, if a better node can be found + final float currentWeight = sorter.weight(currentNode); + final AllocationDeciders deciders = allocation.deciders(); + final String idxName = shard.getIndexName(); + Map nodeDecisions = new HashMap<>(modelNodes.length - 1); + Type rebalanceDecisionType = Type.NO; + String assignedNodeId = null; + for (ModelNode node : modelNodes) { + if (node == currentNode) { + continue; // skip over node we're currently allocated to it + } + final Decision canAllocate = deciders.canAllocate(shard, node.getRoutingNode(), allocation); + // the current weight of the node in the cluster, as computed by the weight function; + // this is a comparison of the number of shards on this node to the number of shards + // that should be on each node on average (both taking the cluster as a whole into account + // as well as shards per index) + final float nodeWeight = sorter.weight(node); + // if the node we are examining has a worse (higher) weight than the node the shard is + // assigned to, then there is no way moving the shard to the node with the worse weight + // can make the balance of the cluster better, so we check for that here + final boolean betterWeightThanCurrent = nodeWeight <= currentWeight; + boolean rebalanceConditionsMet = false; + boolean deltaAboveThreshold = false; + float weightWithShardAdded = Float.POSITIVE_INFINITY; + if (betterWeightThanCurrent) { + // get the delta between the weights of the node we are checking and the node that holds the shard + final float currentDelta = absDelta(nodeWeight, currentWeight); + // checks if the weight delta is above a certain threshold; if it is not above a certain threshold, + // then even though the node we are examining has a better weight and may make the cluster balance + // more even, it doesn't make sense to execute the heavyweight operation of relocating a shard unless + // the gains make it worth it, as defined by the threshold + deltaAboveThreshold = lessThan(currentDelta, threshold) == false; + // simulate the weight of the node if we were to relocate the shard to it + weightWithShardAdded = weight.weightShardAdded(this, node, idxName); + // calculate the delta of the weights of the two nodes if we were to add the shard to the + // node in question and move it away from the node that currently holds it. + final float proposedDelta = weightWithShardAdded - weight.weightShardRemoved(this, currentNode, idxName); + rebalanceConditionsMet = deltaAboveThreshold && proposedDelta < currentDelta; + // if the simulated weight delta with the shard moved away is better than the weight delta + // with the shard remaining on the current node, and we are allowed to allocate to the + // node in question, then allow the rebalance + if (rebalanceConditionsMet && canAllocate.type().higherThan(rebalanceDecisionType)) { + // rebalance to the node, only will get overwritten if the decision here is to + // THROTTLE and we get a decision with YES on another node + rebalanceDecisionType = canAllocate.type(); + assignedNodeId = node.getNodeId(); + } + } + nodeDecisions.put(node.getNodeId(), new NodeRebalanceDecision( + rebalanceConditionsMet ? canAllocate.type() : Type.NO, + canAllocate, + betterWeightThanCurrent, + deltaAboveThreshold, + nodeWeight, + weightWithShardAdded) + ); + } + + + if (canRebalance.type() != Type.YES) { + return new RebalanceDecision(canRebalance, canRebalance.type(), "rebalancing is not allowed", null, + nodeDecisions, currentWeight); + } else { + return RebalanceDecision.decision(canRebalance, rebalanceDecisionType, assignedNodeId, + nodeDecisions, currentWeight, threshold); + } + } + public Map weighShard(ShardRouting shard) { - final NodeSorter sorter = newNodeSorter(); final ModelNode[] modelNodes = sorter.modelNodes; final float[] weights = sorter.weights; - buildWeightOrderedIndices(sorter); + buildWeightOrderedIndices(); Map nodes = new HashMap<>(modelNodes.length); float currentNodeWeight = 0.0f; for (int i = 0; i < modelNodes.length; i++) { @@ -332,20 +460,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * weight of the maximum node and the minimum node according to the * {@link WeightFunction}. This weight is calculated per index to * distribute shards evenly per index. The balancer tries to relocate - * shards only if the delta exceeds the threshold. If the default case + * shards only if the delta exceeds the threshold. In the default case * the threshold is set to 1.0 to enforce gaining relocation * only, or in other words relocations that move the weight delta closer * to 0.0 */ private void balanceByWeights() { - final NodeSorter sorter = newNodeSorter(); final AllocationDeciders deciders = allocation.deciders(); final ModelNode[] modelNodes = sorter.modelNodes; final float[] weights = sorter.weights; - for (String index : buildWeightOrderedIndices(sorter)) { + for (String index : buildWeightOrderedIndices()) { IndexMetaData indexMetaData = metaData.index(index); - // find nodes that have a shard of this index or where shards of this index are allowed to stay + // find nodes that have a shard of this index or where shards of this index are allowed to be allocated to, // move these nodes to the front of modelNodes so that we can only balance based on these nodes int relevantNodes = 0; for (int i = 0; i < modelNodes.length; i++) { @@ -440,14 +567,14 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * allocations on added nodes from one index when the weight parameters * for global balance overrule the index balance at an intermediate * state. For example this can happen if we have 3 nodes and 3 indices - * with 3 shards and 1 shard. At the first stage all three nodes hold - * 2 shard for each index. now we add another node and the first index - * is balanced moving 3 two of the nodes over to the new node since it + * with 3 primary and 1 replica shards. At the first stage all three nodes hold + * 2 shard for each index. Now we add another node and the first index + * is balanced moving three shards from two of the nodes over to the new node since it * has no shards yet and global balance for the node is way below * average. To re-balance we need to move shards back eventually likely * to the nodes we relocated them from. */ - private String[] buildWeightOrderedIndices(NodeSorter sorter) { + private String[] buildWeightOrderedIndices() { final String[] indices = allocation.routingTable().indicesRouting().keys().toArray(String.class); final float[] deltas = new float[indices.length]; for (int i = 0; i < deltas.length; i++) { @@ -501,27 +628,52 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards // Iterate over the started shards interleaving between nodes, and check if they can remain. In the presence of throttling // shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are // offloading the shards. - final NodeSorter sorter = newNodeSorter(); for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(); it.hasNext(); ) { ShardRouting shardRouting = it.next(); - // we can only move started shards... - if (shardRouting.started()) { + final MoveDecision moveDecision = makeMoveDecision(shardRouting); + if (moveDecision.move()) { final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); - assert sourceNode != null && sourceNode.containsShard(shardRouting); - RoutingNode routingNode = sourceNode.getRoutingNode(); - Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation); - if (decision.type() == Decision.Type.NO) { - moveShard(sorter, shardRouting, sourceNode, routingNode); + final ModelNode targetNode = nodes.get(moveDecision.getAssignedNodeId()); + sourceNode.removeShard(shardRouting); + Tuple relocatingShards = routingNodes.relocateShard(shardRouting, targetNode.getNodeId(), + allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); + targetNode.addShard(relocatingShards.v2()); + if (logger.isTraceEnabled()) { + logger.trace("Moved shard [{}] to node [{}]", shardRouting, targetNode.getRoutingNode()); } + } else if (moveDecision.cannotRemain()) { + logger.trace("[{}][{}] can't move", shardRouting.index(), shardRouting.id()); } } } /** - * Move started shard to the minimal eligible node with respect to the weight function + * Makes a decision on whether to move a started shard to another node. The following rules apply + * to the {@link MoveDecision} return object: + * 1. If the shard is not started, no decision will be taken and {@link MoveDecision#isDecisionTaken()} will return false. + * 2. If the shard is allowed to remain on its current node, no attempt will be made to move the shard and + * {@link MoveDecision#canRemainDecision} will have a decision type of YES. All other fields in the object will be null. + * 3. If the shard is not allowed to remain on its current node, then {@link MoveDecision#finalDecision} will be populated + * with the decision of moving to another node. If {@link MoveDecision#finalDecision} returns YES, then + * {@link MoveDecision#assignedNodeId} will return a non-null value, otherwise the assignedNodeId will be null. + * 4. If the method is invoked in explain mode (e.g. from the cluster allocation explain APIs), then + * {@link MoveDecision#finalExplanation} and {@link MoveDecision#nodeDecisions} will have non-null values. */ - private void moveShard(NodeSorter sorter, ShardRouting shardRouting, ModelNode sourceNode, RoutingNode routingNode) { - logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node()); + public MoveDecision makeMoveDecision(final ShardRouting shardRouting) { + if (shardRouting.started() == false) { + // we can only move started shards + return MoveDecision.NOT_TAKEN; + } + + final boolean explain = allocation.debugDecision(); + final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); + assert sourceNode != null && sourceNode.containsShard(shardRouting); + RoutingNode routingNode = sourceNode.getRoutingNode(); + Decision canRemain = allocation.deciders().canRemain(shardRouting, routingNode, allocation); + if (canRemain.type() != Decision.Type.NO) { + return MoveDecision.stay(canRemain, explain); + } + sorter.reset(shardRouting.getIndexName()); /* * the sorter holds the minimum weight node first for the shards index. @@ -529,23 +681,34 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * This is not guaranteed to be balanced after this operation we still try best effort to * allocate on the minimal eligible node. */ + Type bestDecision = Type.NO; + RoutingNode targetNode = null; + final Map nodeExplanationMap = explain ? new HashMap<>() : null; for (ModelNode currentNode : sorter.modelNodes) { if (currentNode != sourceNode) { RoutingNode target = currentNode.getRoutingNode(); // don't use canRebalance as we want hard filtering rules to apply. See #17698 Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); - if (allocationDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too? - sourceNode.removeShard(shardRouting); - Tuple relocatingShards = routingNodes.relocateShard(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); - currentNode.addShard(relocatingShards.v2()); - if (logger.isTraceEnabled()) { - logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node()); + if (explain) { + nodeExplanationMap.put(currentNode.getNodeId(), new WeightedDecision(allocationDecision, sorter.weight(currentNode))); + } + // TODO maybe we can respect throttling here too? + if (allocationDecision.type().higherThan(bestDecision)) { + bestDecision = allocationDecision.type(); + if (bestDecision == Type.YES) { + targetNode = target; + if (explain == false) { + // we are not in explain mode and already have a YES decision on the best weighted node, + // no need to continue iterating + break; + } } - return; } } } - logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id()); + + return MoveDecision.decision(canRemain, bestDecision, explain, shardRouting.currentNodeId(), + targetNode != null ? targetNode.nodeId() : null, nodeExplanationMap); } /** @@ -557,7 +720,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * on the target node which we respect during the allocation / balancing * process. In short, this method recreates the status-quo in the cluster. */ - private void buildModelFromAssigned() { + private Map buildModelFromAssigned() { + Map nodes = new HashMap<>(); for (RoutingNode rn : routingNodes) { ModelNode node = new ModelNode(rn); nodes.put(rn.nodeId(), node); @@ -572,6 +736,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } } } + return nodes; } /** @@ -626,116 +791,61 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards do { for (int i = 0; i < primaryLength; i++) { ShardRouting shard = primary[i]; - if (!shard.primary()) { - final Decision decision = deciders.canAllocate(shard, allocation); - if (decision.type() == Type.NO) { - UnassignedInfo.AllocationStatus allocationStatus = UnassignedInfo.AllocationStatus.fromDecision(decision); - unassigned.ignoreShard(shard, allocationStatus, allocation.changes()); - while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) { - unassigned.ignoreShard(primary[++i], allocationStatus, allocation.changes()); - } - continue; - } else { + ShardAllocationDecision allocationDecision = decideAllocateUnassigned(shard, throttledNodes); + final Type decisionType = allocationDecision.getFinalDecisionType(); + final String assignedNodeId = allocationDecision.getAssignedNodeId(); + final ModelNode minNode = assignedNodeId != null ? nodes.get(assignedNodeId) : null; + + if (decisionType == Type.YES) { + if (logger.isTraceEnabled()) { + logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); + } + + final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); + minNode.addShard(shard); + if (!shard.primary()) { + // copy over the same replica shards to the secondary array so they will get allocated + // in a subsequent iteration, allowing replicas of other shards to be allocated first while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) { secondary[secondaryLength++] = primary[++i]; } } - } - assert !shard.assignedToNode() : shard; - /* find an node with minimal weight we can allocate on*/ - float minWeight = Float.POSITIVE_INFINITY; - ModelNode minNode = null; - Decision decision = null; - if (throttledNodes.size() < nodes.size()) { - /* Don't iterate over an identity hashset here the - * iteration order is different for each run and makes testing hard */ - for (ModelNode node : nodes.values()) { - if (throttledNodes.contains(node)) { - continue; - } - if (!node.containsShard(shard)) { - // simulate weight if we would add shard to node - float currentWeight = weight.weightShardAdded(this, node, shard.getIndexName()); - /* - * Unless the operation is not providing any gains we - * don't check deciders - */ - if (currentWeight <= minWeight) { - Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(), allocation); - NOUPDATE: - if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) { - if (currentWeight == minWeight) { - /* we have an equal weight tie breaking: - * 1. if one decision is YES prefer it - * 2. prefer the node that holds the primary for this index with the next id in the ring ie. - * for the 3 shards 2 replica case we try to build up: - * 1 2 0 - * 2 0 1 - * 0 1 2 - * such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater - * than the id of the shard we need to assign. This works find when new indices are created since - * primaries are added first and we only add one shard set a time in this algorithm. - */ - if (currentDecision.type() == decision.type()) { - final int repId = shard.id(); - final int nodeHigh = node.highestPrimary(shard.index().getName()); - final int minNodeHigh = minNode.highestPrimary(shard.getIndexName()); - if ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) && (nodeHigh < minNodeHigh)) - || (nodeHigh > minNodeHigh && nodeHigh > repId && minNodeHigh < repId)) { - // nothing to set here; the minNode, minWeight, and decision get set below - } else { - break NOUPDATE; - } - } else if (currentDecision.type() != Type.YES) { - break NOUPDATE; - } - } - minNode = node; - minWeight = currentWeight; - decision = currentDecision; - } - } - } + } else { + // did *not* receive a YES decision + if (logger.isTraceEnabled()) { + logger.trace("No eligible node found to assign shard [{}] decision [{}]", shard, decisionType); } - } - assert (decision == null) == (minNode == null); - if (minNode != null) { - final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); - if (decision.type() == Type.YES) { - if (logger.isTraceEnabled()) { - logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); - } - shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); - minNode.addShard(shard); - continue; // don't add to ignoreUnassigned - } else { + if (minNode != null) { + // throttle decision scenario + assert decisionType == Type.THROTTLE; + final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); final RoutingNode node = minNode.getRoutingNode(); final Decision.Type nodeLevelDecision = deciders.canAllocate(node, allocation).type(); if (nodeLevelDecision != Type.YES) { if (logger.isTraceEnabled()) { - logger.trace("Can not allocate on node [{}] remove from round decision [{}]", node, decision.type()); + logger.trace("Can not allocate on node [{}] remove from round decision [{}]", node, decisionType); } assert nodeLevelDecision == Type.NO; throttledNodes.add(minNode); } + } else { + assert decisionType == Type.NO; + if (logger.isTraceEnabled()) { + logger.trace("No Node found to assign shard [{}]", shard); + } } - if (logger.isTraceEnabled()) { - logger.trace("No eligible node found to assign shard [{}] decision [{}]", shard, decision.type()); - } - } else if (logger.isTraceEnabled()) { - logger.trace("No Node found to assign shard [{}]", shard); - } - assert decision == null || decision.type() == Type.THROTTLE; - UnassignedInfo.AllocationStatus allocationStatus = - decision == null ? UnassignedInfo.AllocationStatus.DECIDERS_NO : - UnassignedInfo.AllocationStatus.fromDecision(decision); - unassigned.ignoreShard(shard, allocationStatus, allocation.changes()); - if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas - while(secondaryLength > 0 && comparator.compare(shard, secondary[secondaryLength-1]) == 0) { - unassigned.ignoreShard(secondary[--secondaryLength], allocationStatus, allocation.changes()); + + UnassignedInfo.AllocationStatus allocationStatus = UnassignedInfo.AllocationStatus.fromDecision(decisionType); + unassigned.ignoreShard(shard, allocationStatus, allocation.changes()); + if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas + while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) { + unassigned.ignoreShard(primary[++i], allocationStatus, allocation.changes()); + } } } } @@ -748,6 +858,110 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards // clear everything we have either added it or moved to ignoreUnassigned } + /** + * Make a decision for allocating an unassigned shard. This method returns a two values in a tuple: the + * first value is the {@link Decision} taken to allocate the unassigned shard, the second value is the + * {@link ModelNode} representing the node that the shard should be assigned to. If the decision returned + * is of type {@link Type#NO}, then the assigned node will be null. + */ + private ShardAllocationDecision decideAllocateUnassigned(final ShardRouting shard, final Set throttledNodes) { + if (shard.assignedToNode()) { + // we only make decisions for unassigned shards here + return ShardAllocationDecision.DECISION_NOT_TAKEN; + } + + Decision shardLevelDecision = allocation.deciders().canAllocate(shard, allocation); + if (shardLevelDecision.type() == Type.NO) { + // NO decision for allocating the shard, irrespective of any particular node, so exit early + return ShardAllocationDecision.no(shardLevelDecision, explain("cannot allocate shard in its current state")); + } + + /* find an node with minimal weight we can allocate on*/ + float minWeight = Float.POSITIVE_INFINITY; + ModelNode minNode = null; + Decision decision = null; + final boolean explain = allocation.debugDecision(); + if (throttledNodes.size() >= nodes.size() && explain == false) { + // all nodes are throttled, so we know we won't be able to allocate this round, + // so if we are not in explain mode, short circuit + return ShardAllocationDecision.no(UnassignedInfo.AllocationStatus.DECIDERS_NO, null); + } + /* Don't iterate over an identity hashset here the + * iteration order is different for each run and makes testing hard */ + Map nodeExplanationMap = explain ? new HashMap<>() : null; + for (ModelNode node : nodes.values()) { + if ((throttledNodes.contains(node) || node.containsShard(shard)) && explain == false) { + // decision is NO without needing to check anything further, so short circuit + continue; + } + + // simulate weight if we would add shard to node + float currentWeight = weight.weightShardAdded(this, node, shard.getIndexName()); + // moving the shard would not improve the balance, and we are not in explain mode, so short circuit + if (currentWeight > minWeight && explain == false) { + continue; + } + + Decision currentDecision = allocation.deciders().canAllocate(shard, node.getRoutingNode(), allocation); + if (explain) { + nodeExplanationMap.put(node.getNodeId(), new WeightedDecision(currentDecision, currentWeight)); + } + if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) { + final boolean updateMinNode; + if (currentWeight == minWeight) { + /* we have an equal weight tie breaking: + * 1. if one decision is YES prefer it + * 2. prefer the node that holds the primary for this index with the next id in the ring ie. + * for the 3 shards 2 replica case we try to build up: + * 1 2 0 + * 2 0 1 + * 0 1 2 + * such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater + * than the id of the shard we need to assign. This works find when new indices are created since + * primaries are added first and we only add one shard set a time in this algorithm. + */ + if (currentDecision.type() == decision.type()) { + final int repId = shard.id(); + final int nodeHigh = node.highestPrimary(shard.index().getName()); + final int minNodeHigh = minNode.highestPrimary(shard.getIndexName()); + updateMinNode = ((((nodeHigh > repId && minNodeHigh > repId) + || (nodeHigh < repId && minNodeHigh < repId)) + && (nodeHigh < minNodeHigh)) + || (nodeHigh > minNodeHigh && nodeHigh > repId && minNodeHigh < repId)); + } else { + updateMinNode = currentDecision.type() == Type.YES; + } + } else { + updateMinNode = true; + } + if (updateMinNode) { + minNode = node; + minWeight = currentWeight; + decision = currentDecision; + } + } + } + if (decision == null) { + // decision was not set and a node was not assigned, so treat it as a NO decision + decision = Decision.NO; + } + return ShardAllocationDecision.fromDecision( + decision, + minNode != null ? minNode.getNodeId() : null, + explain, + nodeExplanationMap + ); + } + + // provide an explanation, if in explain mode + private String explain(String explanation) { + if (allocation.debugDecision()) { + return explanation; + } else { + return null; + } + } + /** * Tries to find a relocation from the max node to the minimal node for an arbitrary shard of the given index on the * balance model. Iff this method returns a true the relocation has already been executed on the @@ -792,10 +1006,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards long shardSize = allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); if (decision.type() == Type.YES) { /* only allocate on the cluster if we are not throttled */ - if (logger.isTraceEnabled()) { - logger.trace("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(), + logger.debug("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(), minNode.getNodeId()); - } /* now allocate on the cluster */ minNode.addShard(routingNodes.relocateShard(candidate, minNode.getNodeId(), shardSize, allocation.changes()).v1()); return true; @@ -1010,4 +1222,288 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards return weights[weights.length - 1] - weights[0]; } } + + /** + * Represents a decision to relocate a started shard from its current node. + */ + public abstract static class RelocationDecision { + @Nullable + private final Type finalDecision; + @Nullable + private final String finalExplanation; + @Nullable + private final String assignedNodeId; + + protected RelocationDecision(Type finalDecision, String finalExplanation, String assignedNodeId) { + this.finalDecision = finalDecision; + this.finalExplanation = finalExplanation; + this.assignedNodeId = assignedNodeId; + } + + /** + * Returns {@code true} if a decision was taken by the allocator, {@code false} otherwise. + * If no decision was taken, then the rest of the fields in this object are meaningless and return {@code null}. + */ + public boolean isDecisionTaken() { + return finalDecision != null; + } + + /** + * Returns the final decision made by the allocator on whether to assign the shard, and + * {@code null} if no decision was taken. + */ + public Type getFinalDecisionType() { + return finalDecision; + } + + /** + * Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecisionType()}. + */ + @Nullable + public String getFinalExplanation() { + return finalExplanation; + } + + /** + * Get the node id that the allocator will assign the shard to, unless {@link #getFinalDecisionType()} returns + * a value other than {@link Decision.Type#YES}, in which case this returns {@code null}. + */ + @Nullable + public String getAssignedNodeId() { + return assignedNodeId; + } + } + + /** + * Represents a decision to move a started shard because it is no longer allowed to remain on its current node. + */ + public static final class MoveDecision extends RelocationDecision { + /** a constant representing no decision taken */ + public static final MoveDecision NOT_TAKEN = new MoveDecision(null, null, null, null, null); + /** cached decisions so we don't have to recreate objects for common decisions when not in explain mode. */ + private static final MoveDecision CACHED_STAY_DECISION = new MoveDecision(Decision.YES, Type.NO, null, null, null); + private static final MoveDecision CACHED_CANNOT_MOVE_DECISION = new MoveDecision(Decision.NO, Type.NO, null, null, null); + + @Nullable + private final Decision canRemainDecision; + @Nullable + private final Map nodeDecisions; + + private MoveDecision(Decision canRemainDecision, Type finalDecision, String finalExplanation, + String assignedNodeId, Map nodeDecisions) { + super(finalDecision, finalExplanation, assignedNodeId); + this.canRemainDecision = canRemainDecision; + this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null; + } + + /** + * Creates a move decision for the shard being able to remain on its current node, so not moving. + */ + public static MoveDecision stay(Decision canRemainDecision, boolean explain) { + assert canRemainDecision.type() != Type.NO; + if (explain) { + final String explanation; + if (explain) { + explanation = "shard is allowed to remain on its current node, so no reason to move"; + } else { + explanation = null; + } + return new MoveDecision(Objects.requireNonNull(canRemainDecision), Type.NO, explanation, null, null); + } else { + return CACHED_STAY_DECISION; + } + } + + /** + * Creates a move decision for the shard not being able to remain on its current node. + * + * @param canRemainDecision the decision for whether the shard is allowed to remain on its current node + * @param finalDecision the decision of whether to move the shard to another node + * @param explain true if in explain mode + * @param currentNodeId the current node id where the shard is assigned + * @param assignedNodeId the node id for where the shard can move to + * @param nodeDecisions the node-level decisions that comprised the final decision, non-null iff explain is true + * @return the {@link MoveDecision} for moving the shard to another node + */ + public static MoveDecision decision(Decision canRemainDecision, Type finalDecision, boolean explain, String currentNodeId, + String assignedNodeId, Map nodeDecisions) { + assert canRemainDecision != null; + assert canRemainDecision.type() != Type.YES : "create decision with MoveDecision#stay instead"; + String finalExplanation = null; + if (explain) { + assert currentNodeId != null; + if (finalDecision == Type.YES) { + assert assignedNodeId != null; + finalExplanation = "shard cannot remain on node [" + currentNodeId + "], moving to node [" + assignedNodeId + "]"; + } else if (finalDecision == Type.THROTTLE) { + finalExplanation = "shard cannot remain on node [" + currentNodeId + "], throttled on moving to another node"; + } else { + finalExplanation = "shard cannot remain on node [" + currentNodeId + "], but cannot be assigned to any other node"; + } + } + if (finalExplanation == null && finalDecision == Type.NO) { + // the final decision is NO (no node to move the shard to) and we are not in explain mode, return a cached version + return CACHED_CANNOT_MOVE_DECISION; + } else { + assert ((assignedNodeId == null) == (finalDecision != Type.YES)); + return new MoveDecision(canRemainDecision, finalDecision, finalExplanation, assignedNodeId, nodeDecisions); + } + } + + /** + * Returns {@code true} if the shard cannot remain on its current node and can be moved, returns {@code false} otherwise. + */ + public boolean move() { + return cannotRemain() && getFinalDecisionType() == Type.YES; + } + + /** + * Returns {@code true} if the shard cannot remain on its current node. + */ + public boolean cannotRemain() { + return isDecisionTaken() && canRemainDecision.type() == Type.NO; + } + + /** + * Gets the individual node-level decisions that went into making the final decision as represented by + * {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link WeightedDecision}. + */ + @Nullable + public Map getNodeDecisions() { + return nodeDecisions; + } + } + + /** + * Represents a decision to move a started shard to form a more optimally balanced cluster. + */ + public static final class RebalanceDecision extends RelocationDecision { + /** a constant representing no decision taken */ + public static final RebalanceDecision NOT_TAKEN = new RebalanceDecision(null, null, null, null, null, Float.POSITIVE_INFINITY); + + @Nullable + private final Decision canRebalanceDecision; + @Nullable + private final Map nodeDecisions; + private float currentWeight; + + protected RebalanceDecision(Decision canRebalanceDecision, Type finalDecision, String finalExplanation) { + this(canRebalanceDecision, finalDecision, finalExplanation, null, null, Float.POSITIVE_INFINITY); + } + + protected RebalanceDecision(Decision canRebalanceDecision, Type finalDecision, String finalExplanation, + String assignedNodeId, Map nodeDecisions, float currentWeight) { + super(finalDecision, finalExplanation, assignedNodeId); + this.canRebalanceDecision = canRebalanceDecision; + this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null; + this.currentWeight = currentWeight; + } + + /** + * Creates a new {@link RebalanceDecision}, computing the explanation based on the decision parameters. + */ + public static RebalanceDecision decision(Decision canRebalanceDecision, Type finalDecision, String assignedNodeId, + Map nodeDecisions, float currentWeight, float threshold) { + final String explanation = produceFinalExplanation(finalDecision, assignedNodeId, threshold); + return new RebalanceDecision(canRebalanceDecision, finalDecision, explanation, assignedNodeId, nodeDecisions, currentWeight); + } + + /** + * Returns the decision for being allowed to rebalance the shard. + */ + @Nullable + public Decision getCanRebalanceDecision() { + return canRebalanceDecision; + } + + /** + * Gets the individual node-level decisions that went into making the final decision as represented by + * {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link NodeRebalanceDecision}. + */ + @Nullable + public Map getNodeDecisions() { + return nodeDecisions; + } + + private static String produceFinalExplanation(final Type finalDecisionType, final String assignedNodeId, final float threshold) { + final String finalExplanation; + if (assignedNodeId != null) { + if (finalDecisionType == Type.THROTTLE) { + finalExplanation = "throttle moving shard to node [" + assignedNodeId + "], as it is " + + "currently busy with other shard relocations"; + } else { + finalExplanation = "moving shard to node [" + assignedNodeId + "] to form a more balanced cluster"; + } + } else { + finalExplanation = "cannot rebalance shard, no other node exists that would form a more balanced " + + "cluster within the defined threshold [" + threshold + "]"; + } + return finalExplanation; + } + } + + /** + * A node-level explanation for the decision to rebalance a shard. + */ + public static final class NodeRebalanceDecision { + private final Type nodeDecisionType; + private final Decision canAllocate; + private final boolean betterWeightThanCurrent; + private final boolean deltaAboveThreshold; + private final float currentWeight; + private final float weightWithShardAdded; + + NodeRebalanceDecision(Type nodeDecisionType, Decision canAllocate, boolean betterWeightThanCurrent, + boolean deltaAboveThreshold, float currentWeight, float weightWithShardAdded) { + this.nodeDecisionType = Objects.requireNonNull(nodeDecisionType); + this.canAllocate = Objects.requireNonNull(canAllocate); + this.betterWeightThanCurrent = betterWeightThanCurrent; + this.deltaAboveThreshold = deltaAboveThreshold; + this.currentWeight = currentWeight; + this.weightWithShardAdded = weightWithShardAdded; + } + + /** + * Returns the decision to rebalance to the node. + */ + public Type getNodeDecisionType() { + return nodeDecisionType; + } + + /** + * Returns whether the shard is allowed to be allocated to the node. + */ + public Decision getCanAllocateDecision() { + return canAllocate; + } + + /** + * Returns whether the weight of the node is better than the weight of the node where the shard currently resides. + */ + public boolean isBetterWeightThanCurrent() { + return betterWeightThanCurrent; + } + + /** + * Returns if the weight delta by assigning to this node was above the threshold to warrant a rebalance. + */ + public boolean isDeltaAboveThreshold() { + return deltaAboveThreshold; + } + + /** + * Returns the current weight of the node if the shard is not added to the node. + */ + public float getCurrentWeight() { + return currentWeight; + } + + /** + * Returns the weight of the node if the shard is added to the node. + */ + public float getWeightWithShardAdded() { + return weightWithShardAdded; + } + } + } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java index 3792f536f2f..11db0980f47 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java @@ -138,6 +138,18 @@ public abstract class Decision implements ToXContent { throw new IllegalArgumentException("Invalid Type [" + type + "]"); } } + + public boolean higherThan(Type other) { + if (this == NO) { + return false; + } else if (other == NO) { + return true; + } else if (other == THROTTLE && this == YES) { + return true; + } + return false; + } + } /** @@ -210,7 +222,7 @@ public abstract class Decision implements ToXContent { } /** - * Returns the explanation string, fully formatted. Only formats the string once + * Returns the explanation string, fully formatted. Only formats the string once. */ @Nullable public String getExplanation() { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 64bf5942142..1a38e3742fc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -103,28 +103,33 @@ public class EnableAllocationDecider extends AllocationDecider { final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); final Allocation enable; + final boolean usedIndexSetting; if (INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.exists(indexMetaData.getSettings())) { enable = INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.get(indexMetaData.getSettings()); + usedIndexSetting = true; } else { enable = this.enableAllocation; + usedIndexSetting = false; } switch (enable) { case ALL: return allocation.decision(Decision.YES, NAME, "all allocations are allowed"); case NONE: - return allocation.decision(Decision.NO, NAME, "no allocations are allowed"); + return allocation.decision(Decision.NO, NAME, "no allocations are allowed due to {}", setting(enable, usedIndexSetting)); case NEW_PRIMARIES: if (shardRouting.primary() && shardRouting.active() == false && shardRouting.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE) { return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed"); } else { - return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden"); + return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden due to {}", + setting(enable, usedIndexSetting)); } case PRIMARIES: if (shardRouting.primary()) { return allocation.decision(Decision.YES, NAME, "primary allocations are allowed"); } else { - return allocation.decision(Decision.NO, NAME, "replica allocations are forbidden"); + return allocation.decision(Decision.NO, NAME, "replica allocations are forbidden due to {}", + setting(enable, usedIndexSetting)); } default: throw new IllegalStateException("Unknown allocation option"); @@ -139,33 +144,60 @@ public class EnableAllocationDecider extends AllocationDecider { Settings indexSettings = allocation.metaData().getIndexSafe(shardRouting.index()).getSettings(); final Rebalance enable; + final boolean usedIndexSetting; if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) { enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings); + usedIndexSetting = true; } else { enable = this.enableRebalance; + usedIndexSetting = false; } switch (enable) { case ALL: return allocation.decision(Decision.YES, NAME, "all rebalancing is allowed"); case NONE: - return allocation.decision(Decision.NO, NAME, "no rebalancing is allowed"); + return allocation.decision(Decision.NO, NAME, "no rebalancing is allowed due to {}", setting(enable, usedIndexSetting)); case PRIMARIES: if (shardRouting.primary()) { return allocation.decision(Decision.YES, NAME, "primary rebalancing is allowed"); } else { - return allocation.decision(Decision.NO, NAME, "replica rebalancing is forbidden"); + return allocation.decision(Decision.NO, NAME, "replica rebalancing is forbidden due to {}", + setting(enable, usedIndexSetting)); } case REPLICAS: if (shardRouting.primary() == false) { return allocation.decision(Decision.YES, NAME, "replica rebalancing is allowed"); } else { - return allocation.decision(Decision.NO, NAME, "primary rebalancing is forbidden"); + return allocation.decision(Decision.NO, NAME, "primary rebalancing is forbidden due to {}", + setting(enable, usedIndexSetting)); } default: throw new IllegalStateException("Unknown rebalance option"); } } + private static String setting(Allocation allocation, boolean usedIndexSetting) { + StringBuilder buf = new StringBuilder("["); + if (usedIndexSetting) { + buf.append(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey()); + } else { + buf.append(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey()); + } + buf.append("=").append(allocation.toString().toLowerCase(Locale.ROOT)).append("]"); + return buf.toString(); + } + + private static String setting(Rebalance rebalance, boolean usedIndexSetting) { + StringBuilder buf = new StringBuilder("["); + if (usedIndexSetting) { + buf.append(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey()); + } else { + buf.append(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey()); + } + buf.append("=").append(rebalance.toString().toLowerCase(Locale.ROOT)).append("]"); + return buf.toString(); + } + /** * Allocation values or rather their string representation to be used used with * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java index 395d3472329..0e7159c857b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java @@ -54,7 +54,8 @@ public class MaxRetryAllocationDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { - UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + final Decision decision; if (unassignedInfo != null && unassignedInfo.getNumFailedAllocations() > 0) { final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); final int maxRetry = SETTING_ALLOCATION_MAX_RETRY.get(indexMetaData.getSettings()); @@ -62,16 +63,21 @@ public class MaxRetryAllocationDecider extends AllocationDecider { // if we are called via the _reroute API we ignore the failure counter and try to allocate // this improves the usability since people don't need to raise the limits to issue retries since a simple _reroute call is // enough to manually retry. - return allocation.decision(Decision.YES, NAME, "shard has already failed allocating [" + decision = allocation.decision(Decision.YES, NAME, "shard has already failed allocating [" + unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed " + unassignedInfo.toString() + " - retrying once on manual allocation"); } else if (unassignedInfo.getNumFailedAllocations() >= maxRetry) { - return allocation.decision(Decision.NO, NAME, "shard has already failed allocating [" + decision = allocation.decision(Decision.NO, NAME, "shard has already failed allocating [" + unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed " + unassignedInfo.toString() + " - manually call [/_cluster/reroute?retry_failed=true] to retry"); + } else { + decision = allocation.decision(Decision.YES, NAME, "shard has already failed allocating [" + + unassignedInfo.getNumFailedAllocations() + "] times but [" + maxRetry + "] retries are allowed"); } + } else { + decision = allocation.decision(Decision.YES, NAME, "shard has no previous failures"); } - return allocation.decision(Decision.YES, NAME, "shard has no previous failures"); + return decision; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index bd9bf35a68e..3c20f1ec062 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -23,9 +23,6 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -36,22 +33,6 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { public static final String NAME = "snapshot_in_progress"; - /** - * Disables relocation of shards that are currently being snapshotted. - */ - public static final Setting CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = - Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, - Property.Dynamic, Property.NodeScope); - - private volatile boolean enableRelocation = false; - - /** - * Creates a new {@link org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider} instance - */ - public SnapshotInProgressAllocationDecider() { - this(Settings.Builder.EMPTY_SETTINGS); - } - /** * Creates a new {@link org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider} instance from * given settings @@ -59,18 +40,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { * @param settings {@link org.elasticsearch.common.settings.Settings} to use */ public SnapshotInProgressAllocationDecider(Settings settings) { - this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - } - - public SnapshotInProgressAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - enableRelocation = CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, - this::setEnableRelocation); - } - - private void setEnableRelocation(boolean enableRelocation) { - this.enableRelocation = enableRelocation; } /** @@ -93,7 +63,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { } private Decision canMove(ShardRouting shardRouting, RoutingAllocation allocation) { - if (!enableRelocation && shardRouting.primary()) { + if (shardRouting.primary()) { // Only primary shards are snapshotted SnapshotsInProgress snapshotsInProgress = allocation.custom(SnapshotsInProgress.TYPE); diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index b3ed5aa99cc..af064bd42d0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -81,6 +81,7 @@ import java.util.concurrent.Future; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.stream.Collectors; @@ -122,7 +123,7 @@ public class ClusterService extends AbstractLifecycleComponent { private final Queue onGoingTimeouts = ConcurrentCollections.newQueue(); - private volatile ClusterState clusterState; + private final AtomicReference state = new AtomicReference<>(); private final ClusterBlocks.Builder initialBlocks; @@ -136,7 +137,7 @@ public class ClusterService extends AbstractLifecycleComponent { this.clusterSettings = clusterSettings; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); // will be replaced on doStart. - this.clusterState = ClusterState.builder(clusterName).build(); + this.state.set(new ClusterServiceState(ClusterState.builder(clusterName).build(), ClusterStateStatus.UNKNOWN)); this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold); @@ -157,9 +158,12 @@ public class ClusterService extends AbstractLifecycleComponent { } public synchronized void setLocalNode(DiscoveryNode localNode) { - assert clusterState.nodes().getLocalNodeId() == null : "local node is already set"; - DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId()); - this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + assert state.get().getClusterState().nodes().getLocalNodeId() == null : "local node is already set"; + this.state.getAndUpdate(css -> { + ClusterState clusterState = css.getClusterState(); + DiscoveryNodes nodes = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId()).build(); + return new ClusterServiceState(ClusterState.builder(clusterState).nodes(nodes).build(), css.getClusterStateStatus()); + }); } public synchronized void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { @@ -197,13 +201,14 @@ public class ClusterService extends AbstractLifecycleComponent { @Override protected synchronized void doStart() { Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); - Objects.requireNonNull(clusterState.nodes().getLocalNode(), "please set the local node before starting"); + Objects.requireNonNull(state.get().getClusterState().nodes().getLocalNode(), "please set the local node before starting"); Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting"); add(localNodeMasterListeners); - this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build(); + this.state.getAndUpdate(css -> new ClusterServiceState( + ClusterState.builder(css.getClusterState()).blocks(initialBlocks).build(), + css.getClusterStateStatus())); this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), threadPool.getThreadContext()); - this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build(); } @Override @@ -235,7 +240,7 @@ public class ClusterService extends AbstractLifecycleComponent { * The local node. */ public DiscoveryNode localNode() { - DiscoveryNode localNode = clusterState.getNodes().getLocalNode(); + DiscoveryNode localNode = state.get().getClusterState().getNodes().getLocalNode(); if (localNode == null) { throw new IllegalStateException("No local node found. Is the node started?"); } @@ -247,10 +252,17 @@ public class ClusterService extends AbstractLifecycleComponent { } /** - * The current state. + * The current cluster state. */ public ClusterState state() { - return this.clusterState; + return this.state.get().getClusterState(); + } + + /** + * The current cluster service state comprising cluster state and cluster state status. + */ + public ClusterServiceState clusterServiceState() { + return this.state.get(); } /** @@ -308,7 +320,7 @@ public class ClusterService extends AbstractLifecycleComponent { /** * Adds a cluster state listener that will timeout after the provided timeout, * and is executed after the clusterstate has been successfully applied ie. is - * in state {@link org.elasticsearch.cluster.ClusterState.ClusterStateStatus#APPLIED} + * in state {@link ClusterStateStatus#APPLIED} * NOTE: a {@code null} timeout means that the listener will never be removed * automatically */ @@ -542,7 +554,7 @@ public class ClusterService extends AbstractLifecycleComponent { return; } logger.debug("processing [{}]: execute", tasksSummary); - ClusterState previousClusterState = clusterState; + ClusterState previousClusterState = state.get().getClusterState(); if (!previousClusterState.nodes().isLocalNodeElectedMaster() && executor.runOnlyOnMaster()) { logger.debug("failing [{}]: local node is no longer master", tasksSummary); toExecute.stream().forEach(task -> task.listener.onNoLongerMaster(task.source)); @@ -562,9 +574,9 @@ public class ClusterService extends AbstractLifecycleComponent { executionTime, previousClusterState.version(), tasksSummary, - previousClusterState.nodes().prettyPrint(), - previousClusterState.routingTable().prettyPrint(), - previousClusterState.getRoutingNodes().prettyPrint()), + previousClusterState.nodes(), + previousClusterState.routingTable(), + previousClusterState.getRoutingNodes()), e); } warnAboutSlowTaskIfNeeded(executionTime, tasksSummary); @@ -653,10 +665,8 @@ public class ClusterService extends AbstractLifecycleComponent { } final Discovery.AckListener ackListener = new DelegetingAckListener(ackListeners); - newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED); - if (logger.isTraceEnabled()) { - logger.trace("cluster state updated, source [{}]\n{}", tasksSummary, newClusterState.prettyPrint()); + logger.trace("cluster state updated, source [{}]\n{}", tasksSummary, newClusterState); } else if (logger.isDebugEnabled()) { logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), tasksSummary); } @@ -671,7 +681,7 @@ public class ClusterService extends AbstractLifecycleComponent { } } - nodeConnectionsService.connectToAddedNodes(clusterChangedEvent); + nodeConnectionsService.connectToNodes(clusterChangedEvent.nodesDelta().addedNodes()); // if we are the master, publish the new state to all nodes // we publish here before we send a notification to all the listeners, since if it fails @@ -686,13 +696,15 @@ public class ClusterService extends AbstractLifecycleComponent { (Supplier) () -> new ParameterizedMessage( "failing [{}]: failed to commit cluster state version [{}]", tasksSummary, version), t); + // ensure that list of connected nodes in NodeConnectionsService is in-sync with the nodes of the current cluster state + nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().addedNodes()); proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t)); return; } } // update the current cluster state - clusterState = newClusterState; + state.set(new ClusterServiceState(newClusterState, ClusterStateStatus.BEING_APPLIED)); logger.debug("set local cluster state to version {}", newClusterState.version()); try { // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency @@ -712,9 +724,9 @@ public class ClusterService extends AbstractLifecycleComponent { } } - nodeConnectionsService.disconnectFromRemovedNodes(clusterChangedEvent); + nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes()); - newClusterState.status(ClusterState.ClusterStateStatus.APPLIED); + state.getAndUpdate(css -> new ClusterServiceState(css.getClusterState(), ClusterStateStatus.APPLIED)); for (ClusterStateListener listener : postAppliedListeners) { try { @@ -759,7 +771,7 @@ public class ClusterService extends AbstractLifecycleComponent { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); final long version = newClusterState.version(); final String stateUUID = newClusterState.stateUUID(); - final String prettyPrint = newClusterState.prettyPrint(); + final String fullState = newClusterState.toString(); logger.warn( (Supplier) () -> new ParameterizedMessage( "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", @@ -767,7 +779,7 @@ public class ClusterService extends AbstractLifecycleComponent { version, stateUUID, tasksSummary, - prettyPrint), + fullState), e); // TODO: do we want to call updateTask.onFailure here? } @@ -826,9 +838,7 @@ public class ClusterService extends AbstractLifecycleComponent { (Supplier) () -> new ParameterizedMessage( "exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" + "{}\nnew cluster state:\n{}", - source, - oldState.prettyPrint(), - newState.prettyPrint()), + source, oldState, newState), e); } } @@ -893,7 +903,11 @@ public class ClusterService extends AbstractLifecycleComponent { @Override public void run() { - runTasksForExecutor(executor); + // if this task is already processed, the executor shouldn't execute other tasks (that arrived later), + // to give other executors a chance to execute their tasks. + if (processed.get() == false) { + runTasksForExecutor(executor); + } } public String toString(ClusterStateTaskExecutor executor) { diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterServiceState.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterServiceState.java new file mode 100644 index 00000000000..3002941b482 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterServiceState.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.service; + +import org.elasticsearch.cluster.ClusterState; + +/** + * A simple immutable container class that comprises a cluster state and cluster state status. Used by {@link ClusterService} + * to provide a snapshot view on which cluster state is currently being applied / already applied. + */ +public class ClusterServiceState { + private final ClusterState clusterState; + private final ClusterStateStatus clusterStateStatus; + + public ClusterServiceState(ClusterState clusterState, ClusterStateStatus clusterStateStatus) { + this.clusterState = clusterState; + this.clusterStateStatus = clusterStateStatus; + } + + public ClusterState getClusterState() { + return clusterState; + } + + public ClusterStateStatus getClusterStateStatus() { + return clusterStateStatus; + } + + @Override + public String toString() { + return "version [" + clusterState.version() + "], status [" + clusterStateStatus + "]"; + } +} diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/Ec2Module.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterStateStatus.java similarity index 74% rename from plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/Ec2Module.java rename to core/src/main/java/org/elasticsearch/cluster/service/ClusterStateStatus.java index b7cc7a09386..419b307be68 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/Ec2Module.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterStateStatus.java @@ -17,15 +17,10 @@ * under the License. */ -package org.elasticsearch.cloud.aws; - -import org.elasticsearch.common.inject.AbstractModule; - -public class Ec2Module extends AbstractModule { - - @Override - protected void configure() { - bind(AwsEc2Service.class).to(AwsEc2ServiceImpl.class).asEagerSingleton(); - } +package org.elasticsearch.cluster.service; +public enum ClusterStateStatus { + UNKNOWN, + BEING_APPLIED, + APPLIED; } diff --git a/core/src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java b/core/src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java index d53dcb32584..93da0b99e9a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/PendingClusterTask.java @@ -28,8 +28,6 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; -/** - */ public class PendingClusterTask implements Streamable { private long insertOrder; diff --git a/core/src/main/java/org/elasticsearch/common/Booleans.java b/core/src/main/java/org/elasticsearch/common/Booleans.java index 9c5f5746633..9ec1ac968ac 100644 --- a/core/src/main/java/org/elasticsearch/common/Booleans.java +++ b/core/src/main/java/org/elasticsearch/common/Booleans.java @@ -19,9 +19,6 @@ package org.elasticsearch.common; -/** - * - */ public class Booleans { /** diff --git a/core/src/main/java/org/elasticsearch/common/Classes.java b/core/src/main/java/org/elasticsearch/common/Classes.java index 4a73c0d8ae4..091e3465028 100644 --- a/core/src/main/java/org/elasticsearch/common/Classes.java +++ b/core/src/main/java/org/elasticsearch/common/Classes.java @@ -21,9 +21,6 @@ package org.elasticsearch.common; import java.lang.reflect.Modifier; -/** - * - */ public class Classes { /** diff --git a/core/src/main/java/org/elasticsearch/common/Strings.java b/core/src/main/java/org/elasticsearch/common/Strings.java index 955b836ca1c..1ef13e3bc70 100644 --- a/core/src/main/java/org/elasticsearch/common/Strings.java +++ b/core/src/main/java/org/elasticsearch/common/Strings.java @@ -45,9 +45,6 @@ import java.util.TreeSet; import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.common.util.set.Sets.newHashSet; -/** - * - */ public class Strings { public static final String[] EMPTY_ARRAY = new String[0]; diff --git a/core/src/main/java/org/elasticsearch/common/Table.java b/core/src/main/java/org/elasticsearch/common/Table.java index ab0252b11dc..430070ee19c 100644 --- a/core/src/main/java/org/elasticsearch/common/Table.java +++ b/core/src/main/java/org/elasticsearch/common/Table.java @@ -30,8 +30,6 @@ import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyMap; -/** - */ public class Table { private List headers = new ArrayList<>(); @@ -197,6 +195,22 @@ public class Table { return null; } + public Map getAliasMap() { + Map headerAliasMap = new HashMap<>(); + for (int i = 0; i < headers.size(); i++) { + Cell headerCell = headers.get(i); + String headerName = headerCell.value.toString(); + if (headerCell.attr.containsKey("alias")) { + String[] aliases = Strings.splitStringByCommaToArray(headerCell.attr.get("alias")); + for (String alias : aliases) { + headerAliasMap.put(alias, headerName); + } + } + headerAliasMap.put(headerName, headerName); + } + return headerAliasMap; + } + public static class Cell { public final Object value; public final Map attr; diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/BlobStoreException.java b/core/src/main/java/org/elasticsearch/common/blobstore/BlobStoreException.java index 4d982b3c3e3..700bff5ebae 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/BlobStoreException.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/BlobStoreException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class BlobStoreException extends ElasticsearchException { public BlobStoreException(String msg) { diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index 725535ecadb..ce696678896 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -33,9 +33,6 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -/** - * - */ public class FsBlobStore extends AbstractComponent implements BlobStore { private final Path path; diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/support/PlainBlobMetaData.java b/core/src/main/java/org/elasticsearch/common/blobstore/support/PlainBlobMetaData.java index ea931631e0c..e51c56b6c11 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/support/PlainBlobMetaData.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/support/PlainBlobMetaData.java @@ -21,9 +21,6 @@ package org.elasticsearch.common.blobstore.support; import org.elasticsearch.common.blobstore.BlobMetaData; -/** - * - */ public class PlainBlobMetaData implements BlobMetaData { private final String name; diff --git a/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index 43c1df588b1..9b78c2fe5a7 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -20,12 +20,6 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; -import java.io.OutputStream; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; public final class BytesArray extends BytesReference { diff --git a/core/src/main/java/org/elasticsearch/common/cache/Cache.java b/core/src/main/java/org/elasticsearch/common/cache/Cache.java index a42d01ccf72..cf8b58d0271 100644 --- a/core/src/main/java/org/elasticsearch/common/cache/Cache.java +++ b/core/src/main/java/org/elasticsearch/common/cache/Cache.java @@ -67,13 +67,13 @@ import java.util.function.ToLongBiFunction; */ public class Cache { // positive if entries have an expiration - private long expireAfterAccess = -1; + private long expireAfterAccessNanos = -1; // true if entries can expire after access private boolean entriesExpireAfterAccess; // positive if entries have an expiration after write - private long expireAfterWrite = -1; + private long expireAfterWriteNanos = -1; // true if entries can expire after initial insertion private boolean entriesExpireAfterWrite; @@ -98,22 +98,32 @@ public class Cache { Cache() { } - void setExpireAfterAccess(long expireAfterAccess) { - if (expireAfterAccess <= 0) { - throw new IllegalArgumentException("expireAfterAccess <= 0"); + void setExpireAfterAccessNanos(long expireAfterAccessNanos) { + if (expireAfterAccessNanos <= 0) { + throw new IllegalArgumentException("expireAfterAccessNanos <= 0"); } - this.expireAfterAccess = expireAfterAccess; + this.expireAfterAccessNanos = expireAfterAccessNanos; this.entriesExpireAfterAccess = true; } - void setExpireAfterWrite(long expireAfterWrite) { - if (expireAfterWrite <= 0) { - throw new IllegalArgumentException("expireAfterWrite <= 0"); + // pkg-private for testing + long getExpireAfterAccessNanos() { + return this.expireAfterAccessNanos; + } + + void setExpireAfterWriteNanos(long expireAfterWriteNanos) { + if (expireAfterWriteNanos <= 0) { + throw new IllegalArgumentException("expireAfterWriteNanos <= 0"); } - this.expireAfterWrite = expireAfterWrite; + this.expireAfterWriteNanos = expireAfterWriteNanos; this.entriesExpireAfterWrite = true; } + // pkg-private for testing + long getExpireAfterWriteNanos() { + return this.expireAfterWriteNanos; + } + void setMaximumWeight(long maximumWeight) { if (maximumWeight < 0) { throw new IllegalArgumentException("maximumWeight < 0"); @@ -696,8 +706,8 @@ public class Cache { } private boolean isExpired(Entry entry, long now) { - return (entriesExpireAfterAccess && now - entry.accessTime > expireAfterAccess) || - (entriesExpireAfterWrite && now - entry.writeTime > expireAfterWrite); + return (entriesExpireAfterAccess && now - entry.accessTime > expireAfterAccessNanos) || + (entriesExpireAfterWrite && now - entry.writeTime > expireAfterWriteNanos); } private boolean unlink(Entry entry) { diff --git a/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java b/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java index ffb0e591180..67c8d508ba5 100644 --- a/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java @@ -19,13 +19,15 @@ package org.elasticsearch.common.cache; +import org.elasticsearch.common.unit.TimeValue; + import java.util.Objects; import java.util.function.ToLongBiFunction; public class CacheBuilder { private long maximumWeight = -1; - private long expireAfterAccess = -1; - private long expireAfterWrite = -1; + private long expireAfterAccessNanos = -1; + private long expireAfterWriteNanos = -1; private ToLongBiFunction weigher; private RemovalListener removalListener; @@ -44,19 +46,35 @@ public class CacheBuilder { return this; } - public CacheBuilder setExpireAfterAccess(long expireAfterAccess) { - if (expireAfterAccess <= 0) { + /** + * Sets the amount of time before an entry in the cache expires after it was last accessed. + * + * @param expireAfterAccess The amount of time before an entry expires after it was last accessed. Must not be {@code null} and must + * be greater than 0. + */ + public CacheBuilder setExpireAfterAccess(TimeValue expireAfterAccess) { + Objects.requireNonNull(expireAfterAccess); + final long expireAfterAccessNanos = expireAfterAccess.getNanos(); + if (expireAfterAccessNanos <= 0) { throw new IllegalArgumentException("expireAfterAccess <= 0"); } - this.expireAfterAccess = expireAfterAccess; + this.expireAfterAccessNanos = expireAfterAccessNanos; return this; } - public CacheBuilder setExpireAfterWrite(long expireAfterWrite) { - if (expireAfterWrite <= 0) { + /** + * Sets the amount of time before an entry in the cache expires after it was written. + * + * @param expireAfterWrite The amount of time before an entry expires after it was written. Must not be {@code null} and must be + * greater than 0. + */ + public CacheBuilder setExpireAfterWrite(TimeValue expireAfterWrite) { + Objects.requireNonNull(expireAfterWrite); + final long expireAfterWriteNanos = expireAfterWrite.getNanos(); + if (expireAfterWriteNanos <= 0) { throw new IllegalArgumentException("expireAfterWrite <= 0"); } - this.expireAfterWrite = expireAfterWrite; + this.expireAfterWriteNanos = expireAfterWriteNanos; return this; } @@ -77,11 +95,11 @@ public class CacheBuilder { if (maximumWeight != -1) { cache.setMaximumWeight(maximumWeight); } - if (expireAfterAccess != -1) { - cache.setExpireAfterAccess(expireAfterAccess); + if (expireAfterAccessNanos != -1) { + cache.setExpireAfterAccessNanos(expireAfterAccessNanos); } - if (expireAfterWrite != -1) { - cache.setExpireAfterWrite(expireAfterWrite); + if (expireAfterWriteNanos != -1) { + cache.setExpireAfterWriteNanos(expireAfterWriteNanos); } if (weigher != null) { cache.setWeigher(weigher); diff --git a/core/src/main/java/org/elasticsearch/common/collect/HppcMaps.java b/core/src/main/java/org/elasticsearch/common/collect/HppcMaps.java index 55fc67831e3..c48626cf7c1 100644 --- a/core/src/main/java/org/elasticsearch/common/collect/HppcMaps.java +++ b/core/src/main/java/org/elasticsearch/common/collect/HppcMaps.java @@ -26,8 +26,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import java.util.Iterator; -/** - */ public final class HppcMaps { private HppcMaps() { diff --git a/core/src/main/java/org/elasticsearch/common/collect/MapBuilder.java b/core/src/main/java/org/elasticsearch/common/collect/MapBuilder.java index bfb0f42f44a..a840ee037f8 100644 --- a/core/src/main/java/org/elasticsearch/common/collect/MapBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/collect/MapBuilder.java @@ -24,9 +24,6 @@ import java.util.Map; import static java.util.Collections.unmodifiableMap; -/** - * - */ public class MapBuilder { public static MapBuilder newMapBuilder() { diff --git a/core/src/main/java/org/elasticsearch/common/collect/Tuple.java b/core/src/main/java/org/elasticsearch/common/collect/Tuple.java index ed88ae9df25..2a0d860e1a3 100644 --- a/core/src/main/java/org/elasticsearch/common/collect/Tuple.java +++ b/core/src/main/java/org/elasticsearch/common/collect/Tuple.java @@ -19,9 +19,6 @@ package org.elasticsearch.common.collect; -/** - * - */ public class Tuple { public static Tuple tuple(V1 v1, V2 v2) { diff --git a/core/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java b/core/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java index 6f1534b57d8..2ed43ccaa24 100644 --- a/core/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java +++ b/core/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java @@ -21,12 +21,10 @@ package org.elasticsearch.common.component; import org.elasticsearch.common.settings.Settings; +import java.io.IOException; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; -/** - * - */ public abstract class AbstractLifecycleComponent extends AbstractComponent implements LifecycleComponent { protected final Lifecycle lifecycle = new Lifecycle(); @@ -104,11 +102,17 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent imple listener.beforeClose(); } lifecycle.moveToClosed(); - doClose(); + try { + doClose(); + } catch (IOException e) { + // TODO: we need to separate out closing (ie shutting down) services, vs releasing runtime transient + // structures. Shutting down services should use IOUtils.close + logger.warn("failed to close " + getClass().getName(), e); + } for (LifecycleListener listener : listeners) { listener.afterClose(); } } - protected abstract void doClose(); + protected abstract void doClose() throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java b/core/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java index b57486105d2..2729d60fa1c 100644 --- a/core/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java +++ b/core/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java @@ -21,9 +21,6 @@ package org.elasticsearch.common.component; import org.elasticsearch.common.lease.Releasable; -/** - * - */ public interface LifecycleComponent extends Releasable { Lifecycle.State lifecycleState(); diff --git a/core/src/main/java/org/elasticsearch/common/component/LifecycleListener.java b/core/src/main/java/org/elasticsearch/common/component/LifecycleListener.java index 52f9bea410b..447fb736e0c 100644 --- a/core/src/main/java/org/elasticsearch/common/component/LifecycleListener.java +++ b/core/src/main/java/org/elasticsearch/common/component/LifecycleListener.java @@ -19,9 +19,6 @@ package org.elasticsearch.common.component; -/** - * - */ public abstract class LifecycleListener { public void beforeStart() { diff --git a/core/src/main/java/org/elasticsearch/common/compress/Compressor.java b/core/src/main/java/org/elasticsearch/common/compress/Compressor.java index 883078dafe8..05706debd37 100644 --- a/core/src/main/java/org/elasticsearch/common/compress/Compressor.java +++ b/core/src/main/java/org/elasticsearch/common/compress/Compressor.java @@ -25,8 +25,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - */ public interface Compressor { boolean isCompressed(BytesReference bytes); diff --git a/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java b/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java index 82e049704cc..067d4666722 100644 --- a/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java +++ b/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java @@ -29,8 +29,6 @@ import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; -/** - */ public class CompressorFactory { public static final Compressor COMPRESSOR = new DeflateCompressor(); diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java index 15e2fb4fabb..f76720b9ed6 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java +++ b/core/src/main/java/org/elasticsearch/common/geo/GeoPoint.java @@ -32,9 +32,6 @@ import java.util.Arrays; import static org.elasticsearch.common.geo.GeoHashUtils.mortonEncode; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; -/** - * - */ public final class GeoPoint { private double lat; diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index b81720057c6..d33616cbe60 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -30,8 +30,6 @@ import org.elasticsearch.index.mapper.GeoPointFieldMapper; import java.io.IOException; -/** - */ public class GeoUtils { /** Maximum valid latitude in degrees. */ diff --git a/core/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java b/core/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java index fe33fadb857..c800e011594 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java +++ b/core/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java @@ -19,8 +19,6 @@ package org.elasticsearch.common.geo; -/** - */ public class ShapesAvailability { public static final boolean SPATIAL4J_AVAILABLE; diff --git a/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java b/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java index e1b0356b686..5a7c9e2a325 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java +++ b/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; -/** - * - */ public enum SpatialStrategy implements Writeable { TERM("term"), diff --git a/core/src/main/java/org/elasticsearch/common/io/FastCharArrayReader.java b/core/src/main/java/org/elasticsearch/common/io/FastCharArrayReader.java index 53b932afd54..f75d8d1c96a 100644 --- a/core/src/main/java/org/elasticsearch/common/io/FastCharArrayReader.java +++ b/core/src/main/java/org/elasticsearch/common/io/FastCharArrayReader.java @@ -22,9 +22,6 @@ package org.elasticsearch.common.io; import java.io.IOException; import java.io.Reader; -/** - * - */ public class FastCharArrayReader extends Reader { /** diff --git a/core/src/main/java/org/elasticsearch/common/io/UTF8StreamWriter.java b/core/src/main/java/org/elasticsearch/common/io/UTF8StreamWriter.java index 59c95f67ea3..0ed5fd498de 100644 --- a/core/src/main/java/org/elasticsearch/common/io/UTF8StreamWriter.java +++ b/core/src/main/java/org/elasticsearch/common/io/UTF8StreamWriter.java @@ -25,8 +25,6 @@ import java.io.OutputStream; import java.io.Writer; -/** - */ public final class UTF8StreamWriter extends Writer { /** diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java index d13f539a670..3193d47c47e 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java @@ -22,8 +22,6 @@ import java.io.EOFException; import java.io.IOException; import java.nio.ByteBuffer; -/** - */ public class ByteBufferStreamInput extends StreamInput { private final ByteBuffer buffer; diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java index 3de5c757ae1..21de0c421b7 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java @@ -69,7 +69,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { @Override public void writeByte(byte b) throws IOException { - ensureCapacity(count+1); + ensureCapacity(count + 1L); bytes.set(count, b); count++; } @@ -87,7 +87,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { } // get enough pages for new size - ensureCapacity(count+length); + ensureCapacity(((long) count) + length); // bulk copy bytes.set(count, b, offset, length); @@ -113,22 +113,17 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { } @Override - public void seek(long position) throws IOException { - if (position > Integer.MAX_VALUE) { - throw new IllegalArgumentException("position " + position + " > Integer.MAX_VALUE"); - } - - count = (int)position; - ensureCapacity(count); + public void seek(long position) { + ensureCapacity(position); + count = (int) position; } public void skip(int length) { - count += length; - ensureCapacity(count); + seek(((long) count) + length); } @Override - public void close() throws IOException { + public void close() { // empty for now. } @@ -156,7 +151,10 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { return bytes.ramBytesUsed(); } - private void ensureCapacity(int offset) { + private void ensureCapacity(long offset) { + if (offset > Integer.MAX_VALUE) { + throw new IllegalArgumentException(getClass().getSimpleName() + " cannot hold more than 2GB of data"); + } bytes = bigArrays.grow(bytes, offset); } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/DataOutputStreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/DataOutputStreamOutput.java index aa542b1c305..184560a4a76 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/DataOutputStreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/DataOutputStreamOutput.java @@ -23,9 +23,6 @@ import java.io.Closeable; import java.io.DataOutput; import java.io.IOException; -/** - * - */ public class DataOutputStreamOutput extends StreamOutput { private final DataOutput out; diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java index d786041af49..a252b66bcfc 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java @@ -25,9 +25,6 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; -/** - * - */ public class InputStreamStreamInput extends StreamInput { private final InputStream is; diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/OutputStreamStreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/OutputStreamStreamOutput.java index 93e1fe8ee90..d30255605e7 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/OutputStreamStreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/OutputStreamStreamOutput.java @@ -22,8 +22,6 @@ package org.elasticsearch.common.io.stream; import java.io.IOException; import java.io.OutputStream; -/** - */ public class OutputStreamStreamOutput extends StreamOutput { private final OutputStream out; diff --git a/core/src/main/java/org/elasticsearch/common/joda/DateMathParser.java b/core/src/main/java/org/elasticsearch/common/joda/DateMathParser.java index 65ec7e7c2b4..ba5531c813c 100644 --- a/core/src/main/java/org/elasticsearch/common/joda/DateMathParser.java +++ b/core/src/main/java/org/elasticsearch/common/joda/DateMathParser.java @@ -25,11 +25,11 @@ import org.joda.time.MutableDateTime; import org.joda.time.format.DateTimeFormatter; import java.util.Objects; -import java.util.concurrent.Callable; +import java.util.function.LongSupplier; /** * A parser for date/time formatted text with optional date math. - * + * * The format of the datetime is configurable, and unix timestamps can also be used. Datemath * is appended to a datetime with the following syntax: * ||[+-/](\d+)?[yMwdhHms]. @@ -43,19 +43,19 @@ public class DateMathParser { this.dateTimeFormatter = dateTimeFormatter; } - public long parse(String text, Callable now) { + public long parse(String text, LongSupplier now) { return parse(text, now, false, null); } // Note: we take a callable here for the timestamp in order to be able to figure out // if it has been used. For instance, the request cache does not cache requests that make // use of `now`. - public long parse(String text, Callable now, boolean roundUp, DateTimeZone timeZone) { + public long parse(String text, LongSupplier now, boolean roundUp, DateTimeZone timeZone) { long time; String mathString; if (text.startsWith("now")) { try { - time = now.call(); + time = now.getAsLong(); } catch (Exception e) { throw new ElasticsearchParseException("could not read the current timestamp", e); } @@ -63,13 +63,10 @@ public class DateMathParser { } else { int index = text.indexOf("||"); if (index == -1) { - return parseDateTime(text, timeZone); + return parseDateTime(text, timeZone, roundUp); } - time = parseDateTime(text.substring(0, index), timeZone); + time = parseDateTime(text.substring(0, index), timeZone, false); mathString = text.substring(index + 2); - if (mathString.isEmpty()) { - return time; - } } return parseMath(mathString, time, roundUp, timeZone); @@ -97,7 +94,7 @@ public class DateMathParser { throw new ElasticsearchParseException("operator not supported for date math [{}]", mathString); } } - + if (i >= mathString.length()) { throw new ElasticsearchParseException("truncated date math [{}]", mathString); } @@ -190,15 +187,29 @@ public class DateMathParser { return dateTime.getMillis(); } - private long parseDateTime(String value, DateTimeZone timeZone) { + private long parseDateTime(String value, DateTimeZone timeZone, boolean roundUpIfNoTime) { DateTimeFormatter parser = dateTimeFormatter.parser(); if (timeZone != null) { parser = parser.withZone(timeZone); } try { - return parser.parseMillis(value); + MutableDateTime date; + // We use 01/01/1970 as a base date so that things keep working with date + // fields that are filled with times without dates + if (roundUpIfNoTime) { + date = new MutableDateTime(1970, 1, 1, 23, 59, 59, 999, DateTimeZone.UTC); + } else { + date = new MutableDateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC); + } + final int end = parser.parseInto(date, value, 0); + if (end < 0) { + int position = ~end; + throw new IllegalArgumentException("Parse failure at index [" + position + "] of [" + value + "]"); + } else if (end != value.length()) { + throw new IllegalArgumentException("Unrecognized chars at the end of [" + value + "]: [" + value.substring(end) + "]"); + } + return date.getMillis(); } catch (IllegalArgumentException e) { - throw new ElasticsearchParseException("failed to parse date field [{}] with format [{}]", e, value, dateTimeFormatter.format()); } } diff --git a/core/src/main/java/org/elasticsearch/common/joda/Joda.java b/core/src/main/java/org/elasticsearch/common/joda/Joda.java index 34c882d0d80..7978ceff48c 100644 --- a/core/src/main/java/org/elasticsearch/common/joda/Joda.java +++ b/core/src/main/java/org/elasticsearch/common/joda/Joda.java @@ -44,9 +44,6 @@ import java.io.IOException; import java.io.Writer; import java.util.Locale; -/** - * - */ public class Joda { public static FormatDateTimeFormatter forPattern(String input) { diff --git a/core/src/main/java/org/elasticsearch/common/lease/Releasables.java b/core/src/main/java/org/elasticsearch/common/lease/Releasables.java index bfabd20976d..bd7b2a6e772 100644 --- a/core/src/main/java/org/elasticsearch/common/lease/Releasables.java +++ b/core/src/main/java/org/elasticsearch/common/lease/Releasables.java @@ -24,6 +24,7 @@ import org.apache.lucene.util.IOUtils; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; +import java.util.concurrent.atomic.AtomicBoolean; /** Utility methods to work with {@link Releasable}s. */ public enum Releasables { @@ -93,4 +94,16 @@ public enum Releasables { public static Releasable wrap(final Releasable... releasables) { return () -> close(releasables); } + + /** + * Equivalent to {@link #wrap(Releasable...)} but can be called multiple times without double releasing. + */ + public static Releasable releaseOnce(final Releasable... releasables) { + final AtomicBoolean released = new AtomicBoolean(false); + return () -> { + if (released.compareAndSet(false, true)) { + close(releasables); + } + }; + } } diff --git a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index d9b811585de..ace0569a14a 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -102,7 +102,7 @@ public class DeprecationLogger { } else { name = "deprecation." + name; } - this.logger = LogManager.getLogger(name, parentLogger.getMessageFactory()); + this.logger = LogManager.getLogger(name); } /** diff --git a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index 22d08202f9f..428e3ce7964 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -99,7 +99,7 @@ public class LogConfigurator { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (file.getFileName().toString().equals("log4j2.properties")) { - configurations.add((PropertiesConfiguration) factory.getConfiguration(file.toString(), file.toUri())); + configurations.add((PropertiesConfiguration) factory.getConfiguration(context, file.toString(), file.toUri())); } return FileVisitResult.CONTINUE; } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/BytesRefs.java b/core/src/main/java/org/elasticsearch/common/lucene/BytesRefs.java index e99f61fe8e6..b363315a108 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/BytesRefs.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/BytesRefs.java @@ -22,8 +22,6 @@ package org.elasticsearch.common.lucene; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -/** - */ public class BytesRefs { /** diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 94e1f05e46b..e3d3bf1c624 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -27,6 +27,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; +import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; @@ -84,9 +85,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -/** - * - */ public class Lucene { public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54"; public static final String LATEST_POSTINGS_FORMAT = "Lucene50"; @@ -353,6 +351,8 @@ public class Lucene { return new ScoreDoc(in.readVInt(), in.readFloat()); } + private static final Class GEO_DISTANCE_SORT_TYPE_CLASS = LatLonDocValuesField.newDistanceSort("some_geo_field", 0, 0).getClass(); + public static void writeTopDocs(StreamOutput out, TopDocs topDocs) throws IOException { if (topDocs instanceof TopFieldDocs) { out.writeBoolean(true); @@ -363,6 +363,16 @@ public class Lucene { out.writeVInt(topFieldDocs.fields.length); for (SortField sortField : topFieldDocs.fields) { + if (sortField.getClass() == GEO_DISTANCE_SORT_TYPE_CLASS) { + // for geo sorting, we replace the SortField with a SortField that assumes a double field. + // this works since the SortField is only used for merging top docs + SortField newSortField = new SortField(sortField.getField(), SortField.Type.DOUBLE); + newSortField.setMissingValue(sortField.getMissingValue()); + sortField = newSortField; + } + if (sortField.getClass() != SortField.class) { + throw new IllegalArgumentException("Cannot serialize SortField impl [" + sortField + "]"); + } if (sortField.getField() == null) { out.writeBoolean(false); } else { diff --git a/core/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java b/core/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java index e1fc0171bb7..2552309450b 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java @@ -28,9 +28,6 @@ import org.apache.lucene.search.SimpleCollector; import java.io.IOException; -/** - * - */ public class MinimumScoreCollector extends SimpleCollector { private final Collector collector; diff --git a/core/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java b/core/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java index e5708df05f3..df17f8d7757 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java @@ -20,9 +20,6 @@ package org.elasticsearch.common.lucene; import org.apache.lucene.search.Scorer; -/** - * - */ public interface ScorerAware { void setScorer(Scorer scorer); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java index 23370bb0ffe..ffd85213e8a 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java @@ -22,9 +22,6 @@ package org.elasticsearch.common.lucene.all; import java.util.ArrayList; import java.util.List; -/** - * - */ public class AllEntries { public static class Entry { private final String name; diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java index 500fa206c96..269c1c55eec 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java @@ -64,6 +64,10 @@ public final class AllTermQuery extends Query { this.term = term; } + public Term getTerm() { + return term; + } + @Override public boolean equals(Object obj) { if (sameClassAs(obj) == false) { diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTokenStream.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTokenStream.java index 045a4badc45..0d29a65d5d5 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTokenStream.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTokenStream.java @@ -28,9 +28,6 @@ import org.apache.lucene.util.SmallFloat; import java.io.IOException; -/** - * - */ public final class AllTokenStream extends TokenFilter { public static TokenStream allTokenStream(String allFieldName, String value, float boost, Analyzer analyzer) throws IOException { return new AllTokenStream(analyzer.tokenStream(allFieldName, value), boost); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java b/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java index 5bb92235044..3a5d71d1fcd 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java @@ -29,9 +29,6 @@ import org.elasticsearch.common.lucene.Lucene; import java.io.IOException; -/** - * - */ public class FilteredCollector implements Collector { private final Collector collector; diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsQuery.java deleted file mode 100644 index 9caf350926c..00000000000 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MatchNoDocsQuery.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.search; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Weight; -import org.apache.lucene.search.ConstantScoreWeight; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.Scorer; - -import java.io.IOException; -import java.util.Set; - -/** - * A query that matches no documents and prints the reason why in the toString method. - */ -public class MatchNoDocsQuery extends Query { - /** - * The reason why the query does not match any document. - */ - private final String reason; - - public MatchNoDocsQuery(String reason) { - this.reason = reason; - } - - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { - return new ConstantScoreWeight(this) { - @Override - public void extractTerms(Set terms) { - } - - @Override - public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return Explanation.noMatch(reason); - } - - @Override - public Scorer scorer(LeafReaderContext context) throws IOException { - return null; - } - }; - } - - @Override - public String toString(String field) { - return "MatchNoDocsQuery[\"" + reason + "\"]"; - } - - @Override - public boolean equals(Object obj) { - return sameClassAs(obj); - } - - @Override - public int hashCode() { - return classHash(); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java index 06ab2b4a530..81a8dfdd1e9 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java @@ -45,9 +45,6 @@ import java.util.List; import java.util.Objects; import java.util.Set; -/** - * - */ public class MoreLikeThisQuery extends Query { public static final String DEFAULT_MINIMUM_SHOULD_MATCH = "30%"; diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index 2a3fd94e914..8933b56b124 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; @@ -34,9 +35,6 @@ import org.elasticsearch.index.mapper.TypeFieldMapper; import java.util.List; import java.util.regex.Pattern; -/** - * - */ public class Queries { public static Query newMatchAllQuery() { diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScoreFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScoreFunction.java index 9b33f3d90cc..189ca1584ae 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScoreFunction.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScoreFunction.java @@ -24,9 +24,6 @@ import org.apache.lucene.index.LeafReaderContext; import java.io.IOException; import java.util.Objects; -/** - * - */ public abstract class ScoreFunction { private final CombineFunction scoreCombiner; diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java index cadb3f55df4..bfe36b0a060 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java @@ -25,9 +25,6 @@ import org.apache.lucene.search.Explanation; import java.io.IOException; import java.util.Objects; -/** - * - */ public class WeightFactorFunction extends ScoreFunction { private static final ScoreFunction SCORE_ONE = new ScoreOne(CombineFunction.MULTIPLY); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java b/core/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java index bb63978d70c..9866e239dba 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java @@ -24,9 +24,6 @@ import org.apache.lucene.store.IndexInput; import java.io.IOException; import java.io.InputStream; -/** - * - */ public class InputStreamIndexInput extends InputStream { private final IndexInput indexInput; diff --git a/core/src/main/java/org/elasticsearch/common/metrics/CounterMetric.java b/core/src/main/java/org/elasticsearch/common/metrics/CounterMetric.java index 917ec9d4960..b65d9999869 100644 --- a/core/src/main/java/org/elasticsearch/common/metrics/CounterMetric.java +++ b/core/src/main/java/org/elasticsearch/common/metrics/CounterMetric.java @@ -21,8 +21,6 @@ package org.elasticsearch.common.metrics; import java.util.concurrent.atomic.LongAdder; -/** - */ public class CounterMetric implements Metric { private final LongAdder counter = new LongAdder(); diff --git a/core/src/main/java/org/elasticsearch/common/metrics/MeanMetric.java b/core/src/main/java/org/elasticsearch/common/metrics/MeanMetric.java index f6f3104f558..1ee1a867fad 100644 --- a/core/src/main/java/org/elasticsearch/common/metrics/MeanMetric.java +++ b/core/src/main/java/org/elasticsearch/common/metrics/MeanMetric.java @@ -21,8 +21,6 @@ package org.elasticsearch.common.metrics; import java.util.concurrent.atomic.LongAdder; -/** - */ public class MeanMetric implements Metric { private final LongAdder counter = new LongAdder(); diff --git a/core/src/main/java/org/elasticsearch/common/metrics/Metric.java b/core/src/main/java/org/elasticsearch/common/metrics/Metric.java index b986c282a59..4f5bce173fc 100644 --- a/core/src/main/java/org/elasticsearch/common/metrics/Metric.java +++ b/core/src/main/java/org/elasticsearch/common/metrics/Metric.java @@ -19,7 +19,5 @@ package org.elasticsearch.common.metrics; -/** - */ public interface Metric { } diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index 60bb35d8f1c..530ecefd4cf 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -45,10 +45,8 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.local.LocalTransport; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -112,7 +110,6 @@ public final class NetworkModule { NetworkService networkService) { this.settings = settings; this.transportClient = transportClient; - registerTransport(LOCAL_TRANSPORT, () -> new LocalTransport(settings, threadPool, namedWriteableRegistry, circuitBreakerService)); for (NetworkPlugin plugin : plugins) { if (transportClient == false && HTTP_ENABLED.get(settings)) { Map> httpTransportFactory = plugin.getHttpTransports(settings, threadPool, bigArrays, @@ -126,7 +123,7 @@ public final class NetworkModule { for (Map.Entry> entry : httpTransportFactory.entrySet()) { registerTransport(entry.getKey(), entry.getValue()); } - List transportInterceptors = plugin.getTransportInterceptors(); + List transportInterceptors = plugin.getTransportInterceptors(namedWriteableRegistry); for (TransportInterceptor interceptor : transportInterceptors) { registerTransportInterceptor(interceptor); } @@ -165,8 +162,8 @@ public final class NetworkModule { * @param commandName the names under which the command should be parsed. The {@link ParseField#getPreferredName()} is special because * it is the name under which the command's reader is registered. */ - private static void registerAllocationCommand(Writeable.Reader reader, AllocationCommand.Parser parser, - ParseField commandName) { + private static void registerAllocationCommand(Writeable.Reader reader, + AllocationCommand.Parser parser, ParseField commandName) { allocationCommandRegistry.register(parser, commandName); namedWriteables.add(new Entry(AllocationCommand.class, commandName.getPreferredName(), reader)); } @@ -237,9 +234,10 @@ public final class NetworkModule { } @Override - public TransportRequestHandler interceptHandler(String action, TransportRequestHandler actualHandler) { + public TransportRequestHandler interceptHandler(String action, String executor, + TransportRequestHandler actualHandler) { for (TransportInterceptor interceptor : this.transportInterceptors) { - actualHandler = interceptor.interceptHandler(action, actualHandler); + actualHandler = interceptor.interceptHandler(action, executor, actualHandler); } return actualHandler; } diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java index 2652f9ff646..b72acf8064c 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -36,9 +36,6 @@ import java.util.List; import java.util.concurrent.TimeUnit; import java.util.function.Function; -/** - * - */ public class NetworkService extends AbstractComponent { /** By default, we bind to loopback interfaces */ diff --git a/core/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java b/core/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java index 258e4355b9f..d1bf59a92c6 100644 --- a/core/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java +++ b/core/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java @@ -20,8 +20,6 @@ package org.elasticsearch.common.recycler; -/** - */ public class NoneRecycler extends AbstractRecycler { public NoneRecycler(C c) { diff --git a/core/src/main/java/org/elasticsearch/common/regex/Regex.java b/core/src/main/java/org/elasticsearch/common/regex/Regex.java index 061ad6c26c0..bcf2dfba3ef 100644 --- a/core/src/main/java/org/elasticsearch/common/regex/Regex.java +++ b/core/src/main/java/org/elasticsearch/common/regex/Regex.java @@ -19,14 +19,16 @@ package org.elasticsearch.common.regex; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.common.Strings; +import java.util.ArrayList; +import java.util.List; import java.util.Locale; import java.util.regex.Pattern; -/** - * - */ public class Regex { /** @@ -46,6 +48,33 @@ public class Regex { return str.equals("*"); } + /** Return an {@link Automaton} that matches the given pattern. */ + public static Automaton simpleMatchToAutomaton(String pattern) { + List automata = new ArrayList<>(); + int previous = 0; + for (int i = pattern.indexOf('*'); i != -1; i = pattern.indexOf('*', i + 1)) { + automata.add(Automata.makeString(pattern.substring(previous, i))); + automata.add(Automata.makeAnyString()); + previous = i + 1; + } + automata.add(Automata.makeString(pattern.substring(previous))); + return Operations.concatenate(automata); + } + + /** + * Return an Automaton that matches the union of the provided patterns. + */ + public static Automaton simpleMatchToAutomaton(String... patterns) { + if (patterns.length < 1) { + throw new IllegalArgumentException("There must be at least one pattern, zero given"); + } + List automata = new ArrayList<>(); + for (String pattern : patterns) { + automata.add(simpleMatchToAutomaton(pattern)); + } + return Operations.union(automata); + } + /** * Match a String against the given pattern, supporting the following simple * pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy" matches (with an diff --git a/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java b/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java index 375c1a27212..3bf4f460d71 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/DateTimeUnit.java @@ -26,9 +26,6 @@ import org.joda.time.chrono.ISOChronology; import java.util.function.Function; -/** - * - */ public enum DateTimeUnit { WEEK_OF_WEEKYEAR( (byte) 1, tz -> ISOChronology.getInstance(tz).weekOfWeekyear()), diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 9a392860096..e72f274fd62 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -498,11 +498,21 @@ public abstract class AbstractScopedSettings extends AbstractComponent { } /** - * Archives broken or unknown settings. Any setting that is not recognized or fails - * validation will be archived. This means the setting is prefixed with {@value ARCHIVED_SETTINGS_PREFIX} - * and remains in the settings object. This can be used to detect broken settings via APIs. + * Archives invalid or unknown settings. Any setting that is not recognized or fails validation + * will be archived. This means the setting is prefixed with {@value ARCHIVED_SETTINGS_PREFIX} + * and remains in the settings object. This can be used to detect invalid settings via APIs. + * + * @param settings the {@link Settings} instance to scan for unknown or invalid settings + * @param unknownConsumer callback on unknown settings (consumer receives unknown key and its + * associated value) + * @param invalidConsumer callback on invalid settings (consumer receives invalid key, its + * associated value and an exception) + * @return a {@link Settings} instance with the unknown or invalid settings archived */ - public Settings archiveUnknownOrBrokenSettings(Settings settings) { + public Settings archiveUnknownOrInvalidSettings( + final Settings settings, + final Consumer> unknownConsumer, + final BiConsumer, IllegalArgumentException> invalidConsumer) { Settings.Builder builder = Settings.builder(); boolean changed = false; for (Map.Entry entry : settings.getAsMap().entrySet()) { @@ -516,10 +526,10 @@ public abstract class AbstractScopedSettings extends AbstractComponent { builder.put(entry.getKey(), entry.getValue()); } else { changed = true; - logger.warn("found unknown setting: {} value: {} - archiving", entry.getKey(), entry.getValue()); + unknownConsumer.accept(entry); /* - * We put them back in here such that tools can check from the outside if there are any indices with broken - * settings. The setting can remain there but we want users to be aware that some of their setting are broken and + * We put them back in here such that tools can check from the outside if there are any indices with invalid + * settings. The setting can remain there but we want users to be aware that some of their setting are invalid and * they can research why and what they need to do to replace them. */ builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue()); @@ -527,12 +537,10 @@ public abstract class AbstractScopedSettings extends AbstractComponent { } } catch (IllegalArgumentException ex) { changed = true; - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "found invalid setting: {} value: {} - archiving", entry.getKey(), entry.getValue()), ex); + invalidConsumer.accept(entry, ex); /* - * We put them back in here such that tools can check from the outside if there are any indices with broken settings. The - * setting can remain there but we want users to be aware that some of their setting are broken and they can research why + * We put them back in here such that tools can check from the outside if there are any indices with invalid settings. The + * setting can remain there but we want users to be aware that some of their setting are invalid and they can research why * and what they need to do to replace them. */ builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue()); diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index b5a0564174a..ac4f3931b78 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -56,8 +56,8 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.fd.FaultDetection; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; +import org.elasticsearch.discovery.zen.FaultDetection; +import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayService; @@ -209,7 +209,6 @@ public final class ClusterSettings extends AbstractScopedSettings { SameShardAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING, InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, - SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, DestructiveOperations.REQUIRES_NAME_SETTING, DiscoverySettings.PUBLISH_TIMEOUT_SETTING, DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING, @@ -328,7 +327,7 @@ public final class ClusterSettings extends AbstractScopedSettings { NodeEnvironment.NODE_ID_SEED_SETTING, DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING, DiscoveryModule.DISCOVERY_TYPE_SETTING, - DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING, + DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING, FaultDetection.PING_RETRIES_SETTING, FaultDetection.PING_TIMEOUT_SETTING, FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING, @@ -346,6 +345,7 @@ public final class ClusterSettings extends AbstractScopedSettings { UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING, SearchService.DEFAULT_KEEPALIVE_SETTING, SearchService.KEEPALIVE_INTERVAL_SETTING, + SearchService.LOW_LEVEL_CANCELLATION_SETTING, Node.WRITE_PORTS_FIELD_SETTING, Node.NODE_NAME_SETTING, Node.NODE_DATA_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index b9391522565..394d84dabdc 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -189,6 +189,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { case IndexMetaData.SETTING_INDEX_UUID: case IndexMetaData.SETTING_VERSION_CREATED: case IndexMetaData.SETTING_VERSION_UPGRADED: + case IndexMetaData.SETTING_INDEX_PROVIDED_NAME: case MergePolicyConfig.INDEX_MERGE_ENABLED: return true; default: diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index 9e5dd0efbe2..819edc246ac 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -582,6 +582,9 @@ public final class Settings implements ToXContent { return builder; } + public static final Set FORMAT_PARAMS = + Collections.unmodifiableSet(new HashSet<>(Arrays.asList("settings_filter", "flat_settings"))); + /** * Returns true if this settings object contains no settings * @return true if this settings object contains no settings diff --git a/core/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java b/core/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java index 28e4506b953..336b9c536a1 100644 --- a/core/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java +++ b/core/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java @@ -68,18 +68,18 @@ public class BoundTransportAddress implements Streamable { int boundAddressLength = in.readInt(); boundAddresses = new TransportAddress[boundAddressLength]; for (int i = 0; i < boundAddressLength; i++) { - boundAddresses[i] = TransportAddressSerializers.addressFromStream(in); + boundAddresses[i] = new TransportAddress(in); } - publishAddress = TransportAddressSerializers.addressFromStream(in); + publishAddress = new TransportAddress(in); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeInt(boundAddresses.length); for (TransportAddress address : boundAddresses) { - TransportAddressSerializers.addressToStream(out, address); + address.writeTo(out); } - TransportAddressSerializers.addressToStream(out, publishAddress); + publishAddress.writeTo(out); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java b/core/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java deleted file mode 100644 index 94c1a2390ac..00000000000 --- a/core/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.transport; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.network.NetworkAddress; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; - -/** - * A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}). - */ -public final class InetSocketTransportAddress implements TransportAddress { - public static final short TYPE_ID = 1; - - private final InetSocketAddress address; - - public InetSocketTransportAddress(InetAddress address, int port) { - this(new InetSocketAddress(address, port)); - } - - public InetSocketTransportAddress(InetSocketAddress address) { - if (address == null) { - throw new IllegalArgumentException("InetSocketAddress must not be null"); - } - if (address.getAddress() == null) { - throw new IllegalArgumentException("Address must be resolved but wasn't - InetSocketAddress#getAddress() returned null"); - } - this.address = address; - } - - /** - * Read from a stream. - */ - public InetSocketTransportAddress(StreamInput in) throws IOException { - final int len = in.readByte(); - final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6) - in.readFully(a); - InetAddress inetAddress = InetAddress.getByAddress(a); - int port = in.readInt(); - this.address = new InetSocketAddress(inetAddress, port); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - byte[] bytes = address().getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6) - out.writeByte((byte) bytes.length); // 1 byte - out.write(bytes, 0, bytes.length); - // don't serialize scope ids over the network!!!! - // these only make sense with respect to the local machine, and will only formulate - // the address incorrectly remotely. - out.writeInt(address.getPort()); - } - - @Override - public short uniqueAddressTypeId() { - return TYPE_ID; - } - - @Override - public boolean sameHost(TransportAddress other) { - return other instanceof InetSocketTransportAddress && - address.getAddress().equals(((InetSocketTransportAddress) other).address.getAddress()); - } - - @Override - public boolean isLoopbackOrLinkLocalAddress() { - return address.getAddress().isLinkLocalAddress() || address.getAddress().isLoopbackAddress(); - } - - @Override - public String getHost() { - return getAddress(); // just delegate no resolving - } - - @Override - public String getAddress() { - return NetworkAddress.format(address.getAddress()); - } - - @Override - public int getPort() { - return address.getPort(); - } - - public InetSocketAddress address() { - return this.address; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - InetSocketTransportAddress address1 = (InetSocketTransportAddress) o; - return address.equals(address1.address); - } - - @Override - public int hashCode() { - return address != null ? address.hashCode() : 0; - } - - @Override - public String toString() { - return NetworkAddress.format(address); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/transport/LocalTransportAddress.java b/core/src/main/java/org/elasticsearch/common/transport/LocalTransportAddress.java deleted file mode 100644 index 48becc832da..00000000000 --- a/core/src/main/java/org/elasticsearch/common/transport/LocalTransportAddress.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.transport; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicLong; - -/** - * - */ -public final class LocalTransportAddress implements TransportAddress { - public static final short TYPE_ID = 2; - - private static final AtomicLong transportAddressIdGenerator = new AtomicLong(); - - /** - * generates a new unique address - */ - public static LocalTransportAddress buildUnique() { - return new LocalTransportAddress(Long.toString(transportAddressIdGenerator.incrementAndGet())); - } - - private String id; - - public LocalTransportAddress(String id) { - this.id = id; - } - - /** - * Read from a stream. - */ - public LocalTransportAddress(StreamInput in) throws IOException { - id = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(id); - } - - public String id() { - return this.id; - } - - @Override - public short uniqueAddressTypeId() { - return TYPE_ID; - } - - @Override - public boolean sameHost(TransportAddress other) { - return other instanceof LocalTransportAddress && id.equals(((LocalTransportAddress) other).id); - } - - @Override - public boolean isLoopbackOrLinkLocalAddress() { - return false; - } - - @Override - public String getHost() { - return "local"; - } - - @Override - public String getAddress() { - return "0.0.0.0"; // see https://en.wikipedia.org/wiki/0.0.0.0 - } - - @Override - public int getPort() { - return 0; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - LocalTransportAddress that = (LocalTransportAddress) o; - - if (id != null ? !id.equals(that.id) : that.id != null) return false; - - return true; - } - - @Override - public int hashCode() { - return id != null ? id.hashCode() : 0; - } - - @Override - public String toString() { - return "local[" + id + "]"; - } -} diff --git a/core/src/main/java/org/elasticsearch/common/transport/NetworkExceptionHelper.java b/core/src/main/java/org/elasticsearch/common/transport/NetworkExceptionHelper.java index 5d6211c3fec..b78d61f2506 100644 --- a/core/src/main/java/org/elasticsearch/common/transport/NetworkExceptionHelper.java +++ b/core/src/main/java/org/elasticsearch/common/transport/NetworkExceptionHelper.java @@ -22,9 +22,6 @@ package org.elasticsearch.common.transport; import java.net.ConnectException; import java.nio.channels.ClosedChannelException; -/** - * - */ public class NetworkExceptionHelper { public static boolean isConnectException(Throwable e) { diff --git a/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java b/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java index f88f1de8fe0..f567264d26e 100644 --- a/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java +++ b/core/src/main/java/org/elasticsearch/common/transport/PortsRange.java @@ -24,9 +24,6 @@ import com.carrotsearch.hppc.IntArrayList; import java.util.StringTokenizer; -/** - * - */ public class PortsRange { private final String portRange; @@ -83,4 +80,11 @@ public class PortsRange { public interface PortCallback { boolean onPortNumber(int portNumber); } + + @Override + public String toString() { + return "PortsRange{" + + "portRange='" + portRange + '\'' + + '}'; + } } diff --git a/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java b/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java index 08e8af2bffe..9c55f2d2f18 100644 --- a/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java +++ b/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java @@ -19,34 +19,111 @@ package org.elasticsearch.common.transport; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.network.NetworkAddress; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; /** - * + * A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}). */ -public interface TransportAddress extends Writeable { +public final class TransportAddress implements Writeable { /** - * Returns the host string for this transport address + * A non-routeable v4 meta transport address that can be used for + * testing or in scenarios where targets should be marked as non-applicable from a transport perspective. */ - String getHost(); + public static final InetAddress META_ADDRESS; + + static { + try { + META_ADDRESS = InetAddress.getByName("0.0.0.0"); + } catch (UnknownHostException e) { + throw new AssertionError(e); + } + } + + private final InetSocketAddress address; + + public TransportAddress(InetAddress address, int port) { + this(new InetSocketAddress(address, port)); + } + + public TransportAddress(InetSocketAddress address) { + if (address == null) { + throw new IllegalArgumentException("InetSocketAddress must not be null"); + } + if (address.getAddress() == null) { + throw new IllegalArgumentException("Address must be resolved but wasn't - InetSocketAddress#getAddress() returned null"); + } + this.address = address; + } /** - * Returns the address string for this transport address + * Read from a stream. */ - String getAddress(); + public TransportAddress(StreamInput in) throws IOException { + final int len = in.readByte(); + final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6) + in.readFully(a); + InetAddress inetAddress = InetAddress.getByAddress(a); + int port = in.readInt(); + this.address = new InetSocketAddress(inetAddress, port); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6) + out.writeByte((byte) bytes.length); // 1 byte + out.write(bytes, 0, bytes.length); + // don't serialize scope ids over the network!!!! + // these only make sense with respect to the local machine, and will only formulate + // the address incorrectly remotely. + out.writeInt(address.getPort()); + } /** - * Returns the port of this transport address if applicable + * Returns a string representation of the enclosed {@link InetSocketAddress} + * @see NetworkAddress#format(InetAddress) */ - int getPort(); + public String getAddress() { + return NetworkAddress.format(address.getAddress()); + } - short uniqueAddressTypeId(); + /** + * Returns the addresses port + */ + public int getPort() { + return address.getPort(); + } - boolean sameHost(TransportAddress other); + /** + * Returns the enclosed {@link InetSocketAddress} + */ + public InetSocketAddress address() { + return this.address; + } - boolean isLoopbackOrLinkLocalAddress(); + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TransportAddress address1 = (TransportAddress) o; + return address.equals(address1.address); + } - String toString(); + @Override + public int hashCode() { + return address != null ? address.hashCode() : 0; + } + + @Override + public String toString() { + return NetworkAddress.format(address); + } } diff --git a/core/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java b/core/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java deleted file mode 100644 index 784bee52d63..00000000000 --- a/core/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.transport; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * A global registry of all supported types of {@link TransportAddress}s. This registry is not open for modification by plugins. - */ -public abstract class TransportAddressSerializers { - private static final Map> ADDRESS_REGISTRY; - - static { - Map> registry = new HashMap<>(); - addAddressType(registry, InetSocketTransportAddress.TYPE_ID, InetSocketTransportAddress::new); - addAddressType(registry, LocalTransportAddress.TYPE_ID, LocalTransportAddress::new); - ADDRESS_REGISTRY = unmodifiableMap(registry); - } - - private static void addAddressType(Map> registry, short uniqueAddressTypeId, - Writeable.Reader address) { - if (registry.containsKey(uniqueAddressTypeId)) { - throw new IllegalStateException("Address [" + uniqueAddressTypeId + "] already bound"); - } - registry.put(uniqueAddressTypeId, address); - } - - public static TransportAddress addressFromStream(StreamInput input) throws IOException { - // TODO why don't we just use named writeables here? - short addressUniqueId = input.readShort(); - Writeable.Reader addressType = ADDRESS_REGISTRY.get(addressUniqueId); - if (addressType == null) { - throw new IOException("No transport address mapped to [" + addressUniqueId + "]"); - } - return addressType.read(input); - } - - public static void addressToStream(StreamOutput out, TransportAddress address) throws IOException { - out.writeShort(address.uniqueAddressTypeId()); - address.writeTo(out); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java b/core/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java index 7a412aac090..e7e43b6d78a 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java +++ b/core/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java @@ -269,4 +269,4 @@ public enum ByteSizeUnit implements Writeable { public static ByteSizeUnit readFrom(StreamInput in) throws IOException { return ByteSizeUnit.fromId(in.readVInt()); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index 7d2be6fee3e..e0782e32cae 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/core/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -29,7 +29,7 @@ import java.io.IOException; import java.util.Locale; import java.util.Objects; -public class ByteSizeValue implements Writeable { +public class ByteSizeValue implements Writeable, Comparable { private final long size; private final ByteSizeUnit unit; @@ -191,15 +191,18 @@ public class ByteSizeValue implements Writeable { return false; } - ByteSizeValue sizeValue = (ByteSizeValue) o; - - return getBytes() == sizeValue.getBytes(); + return compareTo((ByteSizeValue) o) == 0; } @Override public int hashCode() { - int result = Long.hashCode(size); - result = 31 * result + (unit != null ? unit.hashCode() : 0); - return result; + return Double.hashCode(((double) size) * unit.toBytes(1)); + } + + @Override + public int compareTo(ByteSizeValue other) { + double thisValue = ((double) size) * unit.toBytes(1); + double otherValue = ((double) other.size) * other.unit.toBytes(1); + return Double.compare(thisValue, otherValue); } } diff --git a/core/src/main/java/org/elasticsearch/common/unit/SizeUnit.java b/core/src/main/java/org/elasticsearch/common/unit/SizeUnit.java index 984931d2647..bfa8da1791d 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/SizeUnit.java +++ b/core/src/main/java/org/elasticsearch/common/unit/SizeUnit.java @@ -19,9 +19,6 @@ package org.elasticsearch.common.unit; -/** - * - */ public enum SizeUnit { SINGLE { @Override diff --git a/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java b/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java index cba51f29eeb..0f90582007b 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java +++ b/core/src/main/java/org/elasticsearch/common/unit/SizeValue.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; -public class SizeValue implements Writeable { +public class SizeValue implements Writeable, Comparable { private final long size; private final SizeUnit sizeUnit; @@ -201,18 +201,18 @@ public class SizeValue implements Writeable { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - SizeValue sizeValue = (SizeValue) o; - - if (size != sizeValue.size) return false; - if (sizeUnit != sizeValue.sizeUnit) return false; - - return true; + return compareTo((SizeValue) o) == 0; } @Override public int hashCode() { - int result = Long.hashCode(size); - result = 31 * result + (sizeUnit != null ? sizeUnit.hashCode() : 0); - return result; + return Double.hashCode(((double) size) * sizeUnit.toSingles(1)); + } + + @Override + public int compareTo(SizeValue other) { + double thisValue = ((double) size) * sizeUnit.toSingles(1); + double otherValue = ((double) other.size) * other.sizeUnit.toSingles(1); + return Double.compare(thisValue, otherValue); } } diff --git a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java index 8f81efb6498..4ab91aac5b5 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -39,7 +39,7 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; -public class TimeValue implements Writeable { +public class TimeValue implements Writeable, Comparable { /** How many nano-seconds in one milli-second */ public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); @@ -381,17 +381,22 @@ public class TimeValue implements Writeable { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - TimeValue timeValue = (TimeValue) o; - return timeUnit.toNanos(duration) == timeValue.timeUnit.toNanos(timeValue.duration); + return this.compareTo(((TimeValue) o)) == 0; } @Override public int hashCode() { - long normalized = timeUnit.toNanos(duration); - return Long.hashCode(normalized); + return Double.hashCode(((double) duration) * timeUnit.toNanos(1)); } public static long nsecToMSec(long ns) { return ns / NSEC_PER_MSEC; } + + @Override + public int compareTo(TimeValue timeValue) { + double thisValue = ((double) duration) * timeUnit.toNanos(1); + double otherValue = ((double) timeValue.duration) * timeValue.timeUnit.toNanos(1); + return Double.compare(thisValue, otherValue); + } } diff --git a/core/src/main/java/org/elasticsearch/common/util/AbstractArray.java b/core/src/main/java/org/elasticsearch/common/util/AbstractArray.java index 1187dfef7e5..913f1ad26a4 100644 --- a/core/src/main/java/org/elasticsearch/common/util/AbstractArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/AbstractArray.java @@ -23,13 +23,14 @@ import org.apache.lucene.util.Accountable; import java.util.Collection; import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; abstract class AbstractArray implements BigArray { private final BigArrays bigArrays; public final boolean clearOnResize; - private boolean released = false; + private final AtomicBoolean closed = new AtomicBoolean(false); AbstractArray(BigArrays bigArrays, boolean clearOnResize) { this.bigArrays = bigArrays; @@ -38,10 +39,13 @@ abstract class AbstractArray implements BigArray { @Override public final void close() { - bigArrays.adjustBreaker(-ramBytesUsed()); - assert !released : "double release"; - released = true; - doClose(); + if (closed.compareAndSet(false, true)) { + try { + bigArrays.adjustBreaker(-ramBytesUsed()); + } finally { + doClose(); + } + } } protected abstract void doClose(); diff --git a/core/src/main/java/org/elasticsearch/common/util/ArrayUtils.java b/core/src/main/java/org/elasticsearch/common/util/ArrayUtils.java index bb8442efa08..de23663b3e9 100644 --- a/core/src/main/java/org/elasticsearch/common/util/ArrayUtils.java +++ b/core/src/main/java/org/elasticsearch/common/util/ArrayUtils.java @@ -22,9 +22,6 @@ package org.elasticsearch.common.util; import java.lang.reflect.Array; import java.util.Arrays; -/** - * - */ public class ArrayUtils { private ArrayUtils() {} diff --git a/core/src/main/java/org/elasticsearch/common/util/BigArrays.java b/core/src/main/java/org/elasticsearch/common/util/BigArrays.java index 6a15a3d9000..728db17c2a4 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -397,7 +397,7 @@ public class BigArrays implements Releasable { void adjustBreaker(long delta) { if (this.breakerService != null) { CircuitBreaker breaker = this.breakerService.getBreaker(CircuitBreaker.REQUEST); - if (this.checkBreaker == true) { + if (this.checkBreaker) { // checking breaker means potentially tripping, but it doesn't // have to if the delta is negative if (delta > 0) { diff --git a/core/src/main/java/org/elasticsearch/common/util/Callback.java b/core/src/main/java/org/elasticsearch/common/util/Callback.java index d4e3c94f700..3e498ef3747 100644 --- a/core/src/main/java/org/elasticsearch/common/util/Callback.java +++ b/core/src/main/java/org/elasticsearch/common/util/Callback.java @@ -19,9 +19,6 @@ package org.elasticsearch.common.util; -/** - * - */ public interface Callback { void handle(T t); diff --git a/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java b/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java deleted file mode 100644 index fcdfaafb1d5..00000000000 --- a/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.util; - -import org.elasticsearch.common.inject.Binder; -import org.elasticsearch.common.inject.multibindings.MapBinder; -import org.elasticsearch.common.inject.multibindings.Multibinder; -import org.elasticsearch.common.settings.Settings; - -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -/** - * This class defines an official elasticsearch extension point. It registers - * all extensions by a single name and ensures that extensions are not registered - * more than once. - */ -public abstract class ExtensionPoint { - protected final String name; - protected final Class[] singletons; - - /** - * Creates a new extension point - * - * @param name the human readable underscore case name of the extension point. This is used in error messages etc. - * @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)} - */ - public ExtensionPoint(String name, Class... singletons) { - this.name = name; - this.singletons = singletons; - } - - /** - * Binds the extension as well as the singletons to the given guice binder. - * - * @param binder the binder to use - */ - public final void bind(Binder binder) { - for (Class c : singletons) { - binder.bind(c).asEagerSingleton(); - } - bindExtensions(binder); - } - - /** - * Subclasses can bind their type, map or set extensions here. - */ - protected abstract void bindExtensions(Binder binder); - - /** - * A map based extension point which allows to register keyed implementations ie. parsers or some kind of strategies. - */ - public static class ClassMap extends ExtensionPoint { - protected final Class extensionClass; - protected final Map> extensions = new HashMap<>(); - private final Set reservedKeys; - - /** - * Creates a new {@link ClassMap} - * - * @param name the human readable underscore case name of the extension point. This is used in error messages etc. - * @param extensionClass the base class that should be extended - * @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)} - * @param reservedKeys a set of reserved keys by internal implementations - */ - public ClassMap(String name, Class extensionClass, Set reservedKeys, Class... singletons) { - super(name, singletons); - this.extensionClass = extensionClass; - this.reservedKeys = reservedKeys; - } - - /** - * Returns the extension for the given key or null - */ - public Class getExtension(String type) { - return extensions.get(type); - } - - /** - * Registers an extension class for a given key. This method will thr - * - * @param key the extensions key - * @param extension the extension - * @throws IllegalArgumentException iff the key is already registered or if the key is a reserved key for an internal implementation - */ - public final void registerExtension(String key, Class extension) { - if (extensions.containsKey(key) || reservedKeys.contains(key)) { - throw new IllegalArgumentException("Can't register the same [" + this.name + "] more than once for [" + key + "]"); - } - extensions.put(key, extension); - } - - @Override - protected final void bindExtensions(Binder binder) { - MapBinder parserMapBinder = MapBinder.newMapBinder(binder, String.class, extensionClass); - for (Map.Entry> clazz : extensions.entrySet()) { - parserMapBinder.addBinding(clazz.getKey()).to(clazz.getValue()); - } - } - } - - /** - * A Type extension point which basically allows to registered keyed extensions like {@link ClassMap} - * but doesn't instantiate and bind all the registered key value pairs but instead replace a singleton based on a given setting via {@link #bindType(Binder, Settings, String, String)} - * Note: {@link #bind(Binder)} is not supported by this class - */ - public static final class SelectedType extends ClassMap { - - public SelectedType(String name, Class extensionClass) { - super(name, extensionClass, Collections.emptySet()); - } - - /** - * Binds the extension class to the class that is registered for the give configured for the settings key in - * the settings object. - * - * @param binder the binder to use - * @param settings the settings to look up the key to find the implementation to bind - * @param settingsKey the key to use with the settings - * @param defaultValue the default value if the settings do not contain the key, or null if there is no default - * @return the actual bound type key - */ - public String bindType(Binder binder, Settings settings, String settingsKey, String defaultValue) { - final String type = settings.get(settingsKey, defaultValue); - if (type == null) { - throw new IllegalArgumentException("Missing setting [" + settingsKey + "]"); - } - final Class instance = getExtension(type); - if (instance == null) { - throw new IllegalArgumentException("Unknown [" + this.name + "] type [" + type + "] possible values: " - + extensions.keySet()); - } - if (extensionClass == instance) { - binder.bind(extensionClass).asEagerSingleton(); - } else { - binder.bind(extensionClass).to(instance).asEagerSingleton(); - } - return type; - } - - } - - /** - * A set based extension point which allows to register extended classes that might be used to chain additional functionality etc. - */ - public static final class ClassSet extends ExtensionPoint { - protected final Class extensionClass; - private final Set> extensions = new HashSet<>(); - - /** - * Creates a new {@link ClassSet} - * - * @param name the human readable underscore case name of the extension point. This is used in error messages etc. - * @param extensionClass the base class that should be extended - * @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)} - */ - public ClassSet(String name, Class extensionClass, Class... singletons) { - super(name, singletons); - this.extensionClass = extensionClass; - } - - /** - * Registers a new extension - * - * @param extension the extension to register - * @throws IllegalArgumentException iff the class is already registered - */ - public void registerExtension(Class extension) { - if (extensions.contains(extension)) { - throw new IllegalArgumentException("Can't register the same [" + this.name + "] more than once for [" + extension.getName() + "]"); - } - extensions.add(extension); - } - - @Override - protected void bindExtensions(Binder binder) { - Multibinder allocationMultibinder = Multibinder.newSetBinder(binder, extensionClass); - for (Class clazz : extensions) { - binder.bind(clazz).asEagerSingleton(); - allocationMultibinder.addBinding().to(clazz); - } - } - } - - /** - * A an instance of a map, mapping one instance value to another. Both key and value are instances, not classes - * like with other extension points. - */ - public static final class InstanceMap extends ExtensionPoint { - private final Map map = new HashMap<>(); - private final Class keyType; - private final Class valueType; - - /** - * Creates a new {@link ClassSet} - * - * @param name the human readable underscore case name of the extension point. This is used in error messages. - * @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)} - */ - public InstanceMap(String name, Class keyType, Class valueType, Class... singletons) { - super(name, singletons); - this.keyType = keyType; - this.valueType = valueType; - } - - /** - * Registers a mapping from {@code key} to {@code value} - * - * @throws IllegalArgumentException iff the key is already registered - */ - public void registerExtension(K key, V value) { - V old = map.put(key, value); - if (old != null) { - throw new IllegalArgumentException("Cannot register [" + this.name + "] with key [" + key + "] to [" + value + "], already registered to [" + old + "]"); - } - } - - @Override - protected void bindExtensions(Binder binder) { - MapBinder mapBinder = MapBinder.newMapBinder(binder, keyType, valueType); - for (Map.Entry entry : map.entrySet()) { - mapBinder.addBinding(entry.getKey()).toInstance(entry.getValue()); - } - } - } -} diff --git a/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java b/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java index 947aad48737..f40938b8ec0 100644 --- a/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java +++ b/core/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java @@ -69,7 +69,7 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable { super(settings); final Type type = TYPE_SETTING .get(settings); final long limit = LIMIT_HEAP_SETTING .get(settings).getBytes(); - final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); + final int availableProcessors = EsExecutors.numberOfProcessors(settings); // We have a global amount of memory that we need to divide across data types. // Since some types are more useful than other ones we give them different weights. diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java index 1c78b92bebb..1f6b0645d4e 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java @@ -31,9 +31,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.LinkedTransferQueue; -/** - * - */ public abstract class ConcurrentCollections { static final int aggressiveConcurrencyLevel; diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentHashMapLong.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentHashMapLong.java index 4a1d67aa38b..32e3a00c074 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentHashMapLong.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentHashMapLong.java @@ -24,9 +24,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; -/** - * - */ public class ConcurrentHashMapLong implements ConcurrentMapLong { private final ConcurrentMap map; diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentMapLong.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentMapLong.java index 82212ad6165..85482ad6703 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentMapLong.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentMapLong.java @@ -21,9 +21,6 @@ package org.elasticsearch.common.util.concurrent; import java.util.concurrent.ConcurrentMap; -/** - * - */ public interface ConcurrentMapLong extends ConcurrentMap { T get(long key); diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java index 2b19fa2096c..986b925f9e6 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java @@ -24,8 +24,6 @@ import org.elasticsearch.common.metrics.CounterMetric; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ThreadPoolExecutor; -/** - */ public class EsAbortPolicy implements XRejectedExecutionHandler { private final CounterMetric rejected = new CounterMetric(); diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 2d682648ca4..fbb9f65414a 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -40,16 +40,17 @@ public class EsExecutors { * This is used to adjust thread pools sizes etc. per node. */ public static final Setting PROCESSORS_SETTING = - Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, Property.NodeScope); + Setting.intSetting("processors", Runtime.getRuntime().availableProcessors(), 1, Property.NodeScope); /** - * Returns the number of processors available but at most 32. + * Returns the number of available processors. Defaults to + * {@link Runtime#availableProcessors()} but can be overridden by passing a {@link Settings} + * instance with the key "processors" set to the desired value. + * + * @param settings a {@link Settings} instance from which to derive the available processors + * @return the number of available processors */ - public static int boundedNumberOfProcessors(Settings settings) { - /* This relates to issues where machines with large number of cores - * ie. >= 48 create too many threads and run into OOM see #3478 - * We just use an 32 core upper-bound here to not stress the system - * too much with too many created threads */ + public static int numberOfProcessors(final Settings settings) { return PROCESSORS_SETTING.get(settings); } diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java index 8033750d1d2..01fbbac725b 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java @@ -26,8 +26,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - */ public class EsRejectedExecutionException extends ElasticsearchException { private final boolean isExecutorShutdown; diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java index 2f664679bb4..81c102f8fb2 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java @@ -109,6 +109,13 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor { } } + @Override + protected void afterExecute(Runnable r, Throwable t) { + super.afterExecute(r, t); + assert contextHolder.isDefaultContext() : "the thread context is not the default context and the thread [" + + Thread.currentThread().getName() + "] is being returned to the pool after executing [" + r + "]"; + } + /** * Returns a stream of all pending tasks. This is similar to {@link #getQueue()} but will expose the originally submitted * {@link Runnable} instances rather than potentially wrapped ones. diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java index ef39156d5bf..de841634ede 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/FutureUtils.java @@ -23,8 +23,6 @@ import org.elasticsearch.common.SuppressForbidden; import java.util.concurrent.Future; -/** - */ public class FutureUtils { @SuppressForbidden(reason = "Future#cancel()") diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java index d028b5c7f6a..c946c47d5c0 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java @@ -22,9 +22,6 @@ import org.elasticsearch.common.Priority; import java.util.concurrent.Callable; -/** - * - */ public abstract class PrioritizedCallable implements Callable, Comparable { private final Priority priority; diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java index f55c84e943a..813265f19c4 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java @@ -107,6 +107,7 @@ public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor { @Override protected void afterExecute(Runnable r, Throwable t) { + super.afterExecute(r, t); current.remove(r); } diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java index 6c3e24d1ab9..7ef2e96e2c5 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.Priority; import java.util.concurrent.TimeUnit; import java.util.function.LongSupplier; -/** - * - */ public abstract class PrioritizedRunnable implements Runnable, Comparable { private final Priority priority; diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 8c04c24ec5b..18ea7e9ace7 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -246,6 +246,13 @@ public final class ThreadContext implements Closeable, Writeable { return command; } + /** + * Returns true if the current context is the default context. + */ + boolean isDefaultContext() { + return threadLocal.get() == DEFAULT_CONTEXT; + } + @FunctionalInterface public interface StoredContext extends AutoCloseable { @Override @@ -468,10 +475,12 @@ public final class ThreadContext implements Closeable, Writeable { */ private class ContextPreservingAbstractRunnable extends AbstractRunnable { private final AbstractRunnable in; - private final ThreadContext.StoredContext ctx; + private final ThreadContext.StoredContext creatorsContext; + + private ThreadContext.StoredContext threadsOriginalContext = null; private ContextPreservingAbstractRunnable(AbstractRunnable in) { - ctx = newStoredContext(); + creatorsContext = newStoredContext(); this.in = in; } @@ -482,7 +491,13 @@ public final class ThreadContext implements Closeable, Writeable { @Override public void onAfter() { - in.onAfter(); + try { + in.onAfter(); + } finally { + if (threadsOriginalContext != null) { + threadsOriginalContext.restore(); + } + } } @Override @@ -498,8 +513,9 @@ public final class ThreadContext implements Closeable, Writeable { @Override protected void doRun() throws Exception { boolean whileRunning = false; - try (ThreadContext.StoredContext ignore = stashContext()){ - ctx.restore(); + threadsOriginalContext = stashContext(); + try { + creatorsContext.restore(); whileRunning = true; in.doRun(); whileRunning = false; diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java index a74a1a073f8..a741de3a8ef 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class UncategorizedExecutionException extends ElasticsearchException { public UncategorizedExecutionException(String msg, Throwable cause) { diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/XRejectedExecutionHandler.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/XRejectedExecutionHandler.java index e58f2abfd5f..d201484cabc 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/XRejectedExecutionHandler.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/XRejectedExecutionHandler.java @@ -21,8 +21,6 @@ package org.elasticsearch.common.util.concurrent; import java.util.concurrent.RejectedExecutionHandler; -/** - */ public interface XRejectedExecutionHandler extends RejectedExecutionHandler { /** diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java index b8a42cd1e13..1d30b79e295 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java @@ -297,7 +297,7 @@ public final class ConstructingObjectParser value : values.entrySet()) { field(value.getKey()); @@ -881,6 +893,10 @@ public final class XContentBuilder implements BytesStream, Releasable, Flushable //treat as single value value((Path) values); } else { + // checks that the iterable does not contain references to itself because + // iterating over entries will cause a stackoverflow error + ensureNoSelfReferences(values); + startArray(); for (Object value : values) { unknownValue(value); @@ -1012,4 +1028,32 @@ public final class XContentBuilder implements BytesStream, Releasable, Flushable throw new IllegalArgumentException(message); } } + + static void ensureNoSelfReferences(Object value) { + ensureNoSelfReferences(value, Collections.newSetFromMap(new IdentityHashMap<>())); + } + + private static void ensureNoSelfReferences(final Object value, final Set ancestors) { + if (value != null) { + + Iterable it; + if (value instanceof Map) { + it = ((Map) value).values(); + } else if ((value instanceof Iterable) && (value instanceof Path == false)) { + it = (Iterable) value; + } else if (value instanceof Object[]) { + it = Arrays.asList((Object[]) value); + } else { + return; + } + + if (ancestors.add(value) == false) { + throw new IllegalArgumentException("Object has already been built and is self-referencing itself"); + } + for (Object o : it) { + ensureNoSelfReferences(o, ancestors); + } + ancestors.remove(value); + } + } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java index 8d1b8efef51..478f3a8a08f 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java @@ -94,4 +94,9 @@ public interface XContentGenerator extends Closeable, Flushable { void copyCurrentStructure(XContentParser parser) throws IOException; + /** + * Returns {@code true} if this XContentGenerator has been closed. A closed generator can not do any more output. + */ + boolean isClosed(); + } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 2832527a583..1625289e528 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -38,9 +38,6 @@ import java.util.Objects; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; -/** - * - */ @SuppressWarnings("unchecked") public class XContentHelper { diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index f8513828636..e5ab2a9f4c7 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -131,6 +131,10 @@ public interface XContentParser extends Releasable { Map mapOrdered() throws IOException; + Map mapStrings() throws IOException; + + Map mapStringsOrdered() throws IOException; + List list() throws IOException; List listOrderedMap() throws IOException; diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentType.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentType.java index 296f9d2aedd..ddd736e0d00 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentType.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentType.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.xcontent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.cbor.CborXContent; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.smile.SmileXContent; @@ -32,7 +33,7 @@ import java.util.Locale; /** * The content type of {@link org.elasticsearch.common.xcontent.XContent}. */ -public enum XContentType { +public enum XContentType implements Writeable { /** * A JSON based content type. @@ -168,7 +169,8 @@ public enum XContentType { throw new IllegalStateException("Unknown XContentType with index [" + index + "]"); } - public static void writeTo(XContentType contentType, StreamOutput out) throws IOException { - out.writeVInt(contentType.index); + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(index); } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java index ed10ea47c0e..772a5322cc7 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java @@ -23,9 +23,6 @@ import com.fasterxml.jackson.core.JsonParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContentParser; -/** - * - */ public class CborXContentParser extends JsonXContentParser { public CborXContentParser(JsonParser parser) { diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java index 74e1cb5e58f..763fac4c6a3 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java @@ -419,4 +419,8 @@ public class JsonXContentGenerator implements XContentGenerator { generator.close(); } + @Override + public boolean isClosed() { + return generator.isClosed(); + } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java index 5728e6035e6..f7ed46a6496 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java @@ -31,9 +31,6 @@ import org.elasticsearch.common.xcontent.support.AbstractXContentParser; import java.io.IOException; import java.nio.CharBuffer; -/** - * - */ public class JsonXContentParser extends AbstractXContentParser { final JsonParser parser; diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java index 2bbf99db27d..ad8e12e70bf 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java @@ -23,9 +23,6 @@ import com.fasterxml.jackson.core.JsonParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContentParser; -/** - * - */ public class SmileXContentParser extends JsonXContentParser { public SmileXContentParser(JsonParser parser) { diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index 9f313a59b90..d13dcbd9c93 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -31,9 +31,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -/** - * - */ public abstract class AbstractXContentParser implements XContentParser { // Currently this is not a setting that can be changed and is a policy @@ -218,6 +215,16 @@ public abstract class AbstractXContentParser implements XContentParser { return readOrderedMap(this); } + @Override + public Map mapStrings() throws IOException { + return readMapStrings(this); + } + + @Override + public Map mapStringsOrdered() throws IOException { + return readOrderedMapStrings(this); + } + @Override public List list() throws IOException { return readList(this); @@ -232,10 +239,18 @@ public abstract class AbstractXContentParser implements XContentParser { Map newMap(); } + interface MapStringsFactory { + Map newMap(); + } + static final MapFactory SIMPLE_MAP_FACTORY = HashMap::new; static final MapFactory ORDERED_MAP_FACTORY = LinkedHashMap::new; + static final MapStringsFactory SIMPLE_MAP_STRINGS_FACTORY = HashMap::new; + + static final MapStringsFactory ORDERED_MAP_STRINGS_FACTORY = LinkedHashMap::new; + static Map readMap(XContentParser parser) throws IOException { return readMap(parser, SIMPLE_MAP_FACTORY); } @@ -244,6 +259,14 @@ public abstract class AbstractXContentParser implements XContentParser { return readMap(parser, ORDERED_MAP_FACTORY); } + static Map readMapStrings(XContentParser parser) throws IOException { + return readMapStrings(parser, SIMPLE_MAP_STRINGS_FACTORY); + } + + static Map readOrderedMapStrings(XContentParser parser) throws IOException { + return readMapStrings(parser, ORDERED_MAP_STRINGS_FACTORY); + } + static List readList(XContentParser parser) throws IOException { return readList(parser, SIMPLE_MAP_FACTORY); } @@ -272,6 +295,26 @@ public abstract class AbstractXContentParser implements XContentParser { return map; } + static Map readMapStrings(XContentParser parser, MapStringsFactory mapStringsFactory) throws IOException { + Map map = mapStringsFactory.newMap(); + XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } + if (token == XContentParser.Token.START_OBJECT) { + token = parser.nextToken(); + } + for (; token == XContentParser.Token.FIELD_NAME; token = parser.nextToken()) { + // Must point to field name + String fieldName = parser.currentName(); + // And then the value... + parser.nextToken(); + String value = parser.text(); + map.put(fieldName, value); + } + return map; + } + static List readList(XContentParser parser, MapFactory mapFactory) throws IOException { XContentParser.Token token = parser.currentToken(); if (token == null) { diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java index a8c120f424b..a94bf63e270 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java @@ -19,19 +19,22 @@ package org.elasticsearch.common.xcontent.support; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.TimeValue; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Function; -/** - * - */ public class XContentMapValues { /** @@ -134,115 +137,179 @@ public class XContentMapValues { return null; } - public static Map filter(Map map, String[] includes, String[] excludes) { - Map result = new HashMap<>(); - filter(map, result, includes == null ? Strings.EMPTY_ARRAY : includes, excludes == null ? Strings.EMPTY_ARRAY : excludes, new StringBuilder()); - return result; + /** + * Only keep properties in {@code map} that match the {@code includes} but + * not the {@code excludes}. An empty list of includes is interpreted as a + * wildcard while an empty list of excludes does not match anything. + * + * If a property matches both an include and an exclude, then the exclude + * wins. + * + * If an object matches, then any of its sub properties are automatically + * considered as matching as well, both for includes and excludes. + * + * Dots in field names are treated as sub objects. So for instance if a + * document contains {@code a.b} as a property and {@code a} is an include, + * then {@code a.b} will be kept in the filtered map. + */ + public static Map filter(Map map, String[] includes, String[] excludes) { + return filter(includes, excludes).apply(map); } - private static void filter(Map map, Map into, String[] includes, String[] excludes, StringBuilder sb) { - if (includes.length == 0 && excludes.length == 0) { - into.putAll(map); - return; + /** + * Returns a function that filters a document map based on the given include and exclude rules. + * @see #filter(Map, String[], String[]) for details + */ + public static Function, Map> filter(String[] includes, String[] excludes) { + CharacterRunAutomaton matchAllAutomaton = new CharacterRunAutomaton(Automata.makeAnyString()); + + CharacterRunAutomaton include; + if (includes == null || includes.length == 0) { + include = matchAllAutomaton; + } else { + Automaton includeA = Regex.simpleMatchToAutomaton(includes); + includeA = makeMatchDotsInFieldNames(includeA); + include = new CharacterRunAutomaton(includeA); } - for (Map.Entry entry : map.entrySet()) { + + Automaton excludeA; + if (excludes == null || excludes.length == 0) { + excludeA = Automata.makeEmpty(); + } else { + excludeA = Regex.simpleMatchToAutomaton(excludes); + excludeA = makeMatchDotsInFieldNames(excludeA); + } + CharacterRunAutomaton exclude = new CharacterRunAutomaton(excludeA); + + // NOTE: We cannot use Operations.minus because of the special case that + // we want all sub properties to match as soon as an object matches + + return (map) -> filter(map, + include, 0, + exclude, 0, + matchAllAutomaton); + } + + /** Make matches on objects also match dots in field names. + * For instance, if the original simple regex is `foo`, this will translate + * it into `foo` OR `foo.*`. */ + private static Automaton makeMatchDotsInFieldNames(Automaton automaton) { + return Operations.union( + automaton, + Operations.concatenate(Arrays.asList(automaton, Automata.makeChar('.'), Automata.makeAnyString()))); + } + + private static int step(CharacterRunAutomaton automaton, String key, int state) { + for (int i = 0; state != -1 && i < key.length(); ++i) { + state = automaton.step(state, key.charAt(i)); + } + return state; + } + + private static Map filter(Map map, + CharacterRunAutomaton includeAutomaton, int initialIncludeState, + CharacterRunAutomaton excludeAutomaton, int initialExcludeState, + CharacterRunAutomaton matchAllAutomaton) { + Map filtered = new HashMap<>(); + for (Map.Entry entry : map.entrySet()) { String key = entry.getKey(); - int mark = sb.length(); - if (sb.length() > 0) { - sb.append('.'); - } - sb.append(key); - String path = sb.toString(); - if (Regex.simpleMatch(excludes, path)) { - sb.setLength(mark); + int includeState = step(includeAutomaton, key, initialIncludeState); + if (includeState == -1) { continue; } - boolean exactIncludeMatch = false; // true if the current position was specifically mentioned - boolean pathIsPrefixOfAnInclude = false; // true if potentially a sub scope can be included - if (includes.length == 0) { - // implied match anything - exactIncludeMatch = true; + int excludeState = step(excludeAutomaton, key, initialExcludeState); + if (excludeState != -1 && excludeAutomaton.isAccept(excludeState)) { + continue; + } + + Object value = entry.getValue(); + + CharacterRunAutomaton subIncludeAutomaton = includeAutomaton; + int subIncludeState = includeState; + if (includeAutomaton.isAccept(includeState)) { + if (excludeState == -1 || excludeAutomaton.step(excludeState, '.') == -1) { + // the exclude has no chances to match inner properties + filtered.put(key, value); + continue; + } else { + // the object matched, so consider that the include matches every inner property + // we only care about excludes now + subIncludeAutomaton = matchAllAutomaton; + subIncludeState = 0; + } + } + + if (value instanceof Map) { + + subIncludeState = subIncludeAutomaton.step(subIncludeState, '.'); + if (subIncludeState == -1) { + continue; + } + if (excludeState != -1) { + excludeState = excludeAutomaton.step(excludeState, '.'); + } + + Map valueAsMap = (Map) value; + Map filteredValue = filter(valueAsMap, + subIncludeAutomaton, subIncludeState, excludeAutomaton, excludeState, matchAllAutomaton); + if (includeAutomaton.isAccept(includeState) || filteredValue.isEmpty() == false) { + filtered.put(key, filteredValue); + } + + } else if (value instanceof Iterable) { + + List filteredValue = filter((Iterable) value, + subIncludeAutomaton, subIncludeState, excludeAutomaton, excludeState, matchAllAutomaton); + if (includeAutomaton.isAccept(includeState) || filteredValue.isEmpty() == false) { + filtered.put(key, filteredValue); + } + } else { - for (String include : includes) { - // check for prefix matches as well to see if we need to zero in, something like: obj1.arr1.* or *.field - // note, this does not work well with middle matches, like obj1.*.obj3 - if (include.charAt(0) == '*') { - if (Regex.simpleMatch(include, path)) { - exactIncludeMatch = true; - break; - } - pathIsPrefixOfAnInclude = true; - continue; - } - if (include.startsWith(path)) { - if (include.length() == path.length()) { - exactIncludeMatch = true; - break; - } else if (include.length() > path.length() && include.charAt(path.length()) == '.') { - // include might may match deeper paths. Dive deeper. - pathIsPrefixOfAnInclude = true; - continue; - } - } - if (Regex.simpleMatch(include, path)) { - exactIncludeMatch = true; - break; - } + + // leaf property + if (includeAutomaton.isAccept(includeState) + && (excludeState == -1 || excludeAutomaton.isAccept(excludeState) == false)) { + filtered.put(key, value); } + } - if (!(pathIsPrefixOfAnInclude || exactIncludeMatch)) { - // skip subkeys, not interesting. - sb.setLength(mark); - continue; - } - - - if (entry.getValue() instanceof Map) { - Map innerInto = new HashMap<>(); - // if we had an exact match, we want give deeper excludes their chance - filter((Map) entry.getValue(), innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb); - if (exactIncludeMatch || !innerInto.isEmpty()) { - into.put(entry.getKey(), innerInto); - } - } else if (entry.getValue() instanceof List) { - List list = (List) entry.getValue(); - List innerInto = new ArrayList<>(list.size()); - // if we had an exact match, we want give deeper excludes their chance - filter(list, innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb); - into.put(entry.getKey(), innerInto); - } else if (exactIncludeMatch) { - into.put(entry.getKey(), entry.getValue()); - } - sb.setLength(mark); } + return filtered; } - private static void filter(List from, List to, String[] includes, String[] excludes, StringBuilder sb) { - if (includes.length == 0 && excludes.length == 0) { - to.addAll(from); - return; - } - - for (Object o : from) { - if (o instanceof Map) { - Map innerInto = new HashMap<>(); - filter((Map) o, innerInto, includes, excludes, sb); - if (!innerInto.isEmpty()) { - to.add(innerInto); + private static List filter(Iterable iterable, + CharacterRunAutomaton includeAutomaton, int initialIncludeState, + CharacterRunAutomaton excludeAutomaton, int initialExcludeState, + CharacterRunAutomaton matchAllAutomaton) { + List filtered = new ArrayList<>(); + for (Object value : iterable) { + if (value instanceof Map) { + int includeState = includeAutomaton.step(initialIncludeState, '.'); + int excludeState = initialExcludeState; + if (excludeState != -1) { + excludeState = excludeAutomaton.step(excludeState, '.'); } - } else if (o instanceof List) { - List innerInto = new ArrayList<>(); - filter((List) o, innerInto, includes, excludes, sb); - if (!innerInto.isEmpty()) { - to.add(innerInto); + Map filteredValue = filter((Map)value, + includeAutomaton, includeState, excludeAutomaton, excludeState, matchAllAutomaton); + if (filteredValue.isEmpty() == false) { + filtered.add(filteredValue); + } + } else if (value instanceof Iterable) { + List filteredValue = filter((Iterable) value, + includeAutomaton, initialIncludeState, excludeAutomaton, initialExcludeState, matchAllAutomaton); + if (filteredValue.isEmpty() == false) { + filtered.add(filteredValue); } } else { - to.add(o); + // TODO: we have tests relying on this behavior on arrays even + // if the path does not match, but this looks like a bug? + filtered.add(value); } } + return filtered; } public static boolean isObject(Object node) { diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java index 3b674c054dc..5efceac7dcf 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java @@ -23,9 +23,6 @@ import com.fasterxml.jackson.core.JsonParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContentParser; -/** - * - */ public class YamlXContentParser extends JsonXContentParser { public YamlXContentParser(JsonParser parser) { diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index b41316b6534..df68a9fe648 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -19,119 +19,93 @@ package org.elasticsearch.discovery; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.multibindings.Multibinder; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.ExtensionPoint; -import org.elasticsearch.discovery.local.LocalDiscovery; -import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.ping.ZenPing; -import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; - -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.function.Function; +import java.util.function.Supplier; + +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.ZenPing; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; /** * A module for loading classes for node discovery. */ -public class DiscoveryModule extends AbstractModule { +public class DiscoveryModule { public static final Setting DISCOVERY_TYPE_SETTING = - new Setting<>("discovery.type", "zen", Function.identity(), - Property.NodeScope); - public static final Setting ZEN_MASTER_SERVICE_TYPE_SETTING = - new Setting<>("discovery.zen.masterservice.type", "zen", Function.identity(), Property.NodeScope); + new Setting<>("discovery.type", "zen", Function.identity(), Property.NodeScope); + public static final Setting> DISCOVERY_HOSTS_PROVIDER_SETTING = + new Setting<>("discovery.zen.hosts_provider", (String)null, Optional::ofNullable, Property.NodeScope); - private final Settings settings; - private final Map>> unicastHostProviders = new HashMap<>(); - private final ExtensionPoint.ClassSet zenPings = new ExtensionPoint.ClassSet<>("zen_ping", ZenPing.class); - private final Map> discoveryTypes = new HashMap<>(); - private final Map> masterServiceType = new HashMap<>(); + private final Discovery discovery; + private final ZenPing zenPing; - public DiscoveryModule(Settings settings) { - this.settings = settings; - addDiscoveryType("local", LocalDiscovery.class); - addDiscoveryType("zen", ZenDiscovery.class); - addElectMasterService("zen", ElectMasterService.class); - // always add the unicast hosts, or things get angry! - addZenPing(UnicastZenPing.class); - } + public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NetworkService networkService, + ClusterService clusterService, Function createZenPing, + List plugins) { + final UnicastHostsProvider hostsProvider; - /** - * Adds a custom unicast hosts provider to build a dynamic list of unicast hosts list when doing unicast discovery. - * - * @param type discovery for which this provider is relevant - * @param unicastHostProvider the host provider - */ - public void addUnicastHostProvider(String type, Class unicastHostProvider) { - List> providerList = unicastHostProviders.get(type); - if (providerList == null) { - providerList = new ArrayList<>(); - unicastHostProviders.put(type, providerList); + Map> hostProviders = new HashMap<>(); + for (DiscoveryPlugin plugin : plugins) { + plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { + if (hostProviders.put(entry.getKey(), entry.getValue()) != null) { + throw new IllegalArgumentException("Cannot register zen hosts provider [" + entry.getKey() + "] twice"); + } + }); } - providerList.add(unicastHostProvider); - } - - /** - * Adds a custom Discovery type. - */ - public void addDiscoveryType(String type, Class clazz) { - if (discoveryTypes.containsKey(type)) { - throw new IllegalArgumentException("discovery type [" + type + "] is already registered"); + Optional hostsProviderName = DISCOVERY_HOSTS_PROVIDER_SETTING.get(settings); + if (hostsProviderName.isPresent()) { + Supplier hostsProviderSupplier = hostProviders.get(hostsProviderName.get()); + if (hostsProviderSupplier == null) { + throw new IllegalArgumentException("Unknown zen hosts provider [" + hostsProviderName.get() + "]"); + } + hostsProvider = Objects.requireNonNull(hostsProviderSupplier.get()); + } else { + hostsProvider = Collections::emptyList; } - discoveryTypes.put(type, clazz); - } - /** - * Adds a custom zen master service type. - */ - public void addElectMasterService(String type, Class masterService) { - if (masterServiceType.containsKey(type)) { - throw new IllegalArgumentException("master service type [" + type + "] is already registered"); + zenPing = createZenPing.apply(hostsProvider); + + Map> discoveryTypes = new HashMap<>(); + discoveryTypes.put("zen", + () -> new ZenDiscovery(settings, threadPool, transportService, clusterService, clusterService.getClusterSettings(), zenPing)); + discoveryTypes.put("none", () -> new NoneDiscovery(settings, clusterService, clusterService.getClusterSettings())); + for (DiscoveryPlugin plugin : plugins) { + plugin.getDiscoveryTypes(threadPool, transportService, clusterService, zenPing).entrySet().forEach(entry -> { + if (discoveryTypes.put(entry.getKey(), entry.getValue()) != null) { + throw new IllegalArgumentException("Cannot register discovery type [" + entry.getKey() + "] twice"); + } + }); } - this.masterServiceType.put(type, masterService); - } - - public void addZenPing(Class clazz) { - zenPings.registerExtension(clazz); - } - - @Override - protected void configure() { String discoveryType = DISCOVERY_TYPE_SETTING.get(settings); - Class discoveryClass = discoveryTypes.get(discoveryType); - if (discoveryClass == null) { - throw new IllegalArgumentException("Unknown Discovery type [" + discoveryType + "]"); + Supplier discoverySupplier = discoveryTypes.get(discoveryType); + if (discoverySupplier == null) { + throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); } + discovery = Objects.requireNonNull(discoverySupplier.get()); + } - if (discoveryType.equals("local") == false) { - String masterServiceTypeKey = ZEN_MASTER_SERVICE_TYPE_SETTING.get(settings); - final Class masterService = masterServiceType.get(masterServiceTypeKey); - if (masterService == null) { - throw new IllegalArgumentException("Unknown master service type [" + masterServiceTypeKey + "]"); - } - if (masterService == ElectMasterService.class) { - bind(ElectMasterService.class).asEagerSingleton(); - } else { - bind(ElectMasterService.class).to(masterService).asEagerSingleton(); - } - bind(ZenPingService.class).asEagerSingleton(); - Multibinder unicastHostsProviderMultibinder = Multibinder.newSetBinder(binder(), UnicastHostsProvider.class); - for (Class unicastHostProvider : - unicastHostProviders.getOrDefault(discoveryType, Collections.emptyList())) { - unicastHostsProviderMultibinder.addBinding().to(unicastHostProvider); - } - zenPings.bind(binder()); - } - bind(Discovery.class).to(discoveryClass).asEagerSingleton(); + public Discovery getDiscovery() { + return discovery; + } + + // TODO: remove this, it should be completely local to discovery, but service disruption tests want to mess with it + public ZenPing getZenPing() { + return zenPing; } } diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java index fc419ff06a6..9542b14e569 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats; +import org.elasticsearch.discovery.zen.PendingClusterStateStats; import java.io.IOException; diff --git a/core/src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java b/core/src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java index 160915a6e10..282d849debd 100644 --- a/core/src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java +++ b/core/src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java @@ -25,9 +25,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class MasterNotDiscoveredException extends ElasticsearchException { public MasterNotDiscoveredException() { diff --git a/core/src/main/java/org/elasticsearch/discovery/NoneDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/NoneDiscovery.java new file mode 100644 index 00000000000..91b04ce396b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/discovery/NoneDiscovery.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery; + +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.ElectMasterService; + +/** + * A {@link Discovery} implementation that is used by {@link org.elasticsearch.tribe.TribeService}. This implementation + * doesn't support any clustering features. Most notably {@link #startInitialJoin()} does nothing and + * {@link #publish(ClusterChangedEvent, AckListener)} is not supported. + */ +public class NoneDiscovery extends AbstractLifecycleComponent implements Discovery { + + private final ClusterService clusterService; + private final DiscoverySettings discoverySettings; + + @Inject + public NoneDiscovery(Settings settings, ClusterService clusterService, ClusterSettings clusterSettings) { + super(settings); + this.clusterService = clusterService; + this.discoverySettings = new DiscoverySettings(settings, clusterSettings); + } + + @Override + public DiscoveryNode localNode() { + return clusterService.localNode(); + } + + @Override + public String nodeDescription() { + return clusterService.getClusterName().value() + "/" + clusterService.localNode().getId(); + } + + @Override + public void setAllocationService(AllocationService allocationService) { + + } + + @Override + public void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener) { + throw new UnsupportedOperationException(); + } + + @Override + public DiscoveryStats stats() { + return null; + } + + @Override + public DiscoverySettings getDiscoverySettings() { + return discoverySettings; + } + + @Override + public void startInitialJoin() { + + } + + @Override + public int getMinimumMasterNodes() { + return ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings); + } + + @Override + protected void doStart() { + + } + + @Override + protected void doStop() { + + } + + @Override + protected void doClose() { + + } +} diff --git a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java deleted file mode 100644 index 6b943bde78b..00000000000 --- a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java +++ /dev/null @@ -1,425 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.local; - -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.Diff; -import org.elasticsearch.cluster.IncompatibleClusterStateVersionException; -import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler; -import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats; - -import java.util.HashSet; -import java.util.Optional; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; - -import static org.elasticsearch.cluster.ClusterState.Builder; - -/** - * - */ -public class LocalDiscovery extends AbstractLifecycleComponent implements Discovery { - - private static final LocalDiscovery[] NO_MEMBERS = new LocalDiscovery[0]; - - private final ClusterService clusterService; - private AllocationService allocationService; - private final ClusterName clusterName; - - private final DiscoverySettings discoverySettings; - - private volatile boolean master = false; - - private static final ConcurrentMap clusterGroups = ConcurrentCollections.newConcurrentMap(); - - private volatile ClusterState lastProcessedClusterState; - - @Inject - public LocalDiscovery(Settings settings, ClusterService clusterService, ClusterSettings clusterSettings) { - super(settings); - this.clusterName = clusterService.getClusterName(); - this.clusterService = clusterService; - this.discoverySettings = new DiscoverySettings(settings, clusterSettings); - } - - @Override - public void setAllocationService(AllocationService allocationService) { - this.allocationService = allocationService; - } - - @Override - protected void doStart() { - - } - - @Override - public void startInitialJoin() { - synchronized (clusterGroups) { - ClusterGroup clusterGroup = clusterGroups.get(clusterName); - if (clusterGroup == null) { - clusterGroup = new ClusterGroup(); - clusterGroups.put(clusterName, clusterGroup); - } - logger.debug("Connected to cluster [{}]", clusterName); - - Optional current = clusterGroup.members().stream().filter(other -> ( - other.localNode().equals(this.localNode()) || other.localNode().getId().equals(this.localNode().getId()) - )).findFirst(); - if (current.isPresent()) { - throw new IllegalStateException("current cluster group already contains a node with the same id. current " - + current.get().localNode() + ", this node " + localNode()); - } - - clusterGroup.members().add(this); - - LocalDiscovery firstMaster = null; - for (LocalDiscovery localDiscovery : clusterGroup.members()) { - if (localDiscovery.localNode().isMasterNode()) { - firstMaster = localDiscovery; - break; - } - } - - if (firstMaster != null && firstMaster.equals(this)) { - // we are the first master (and the master) - master = true; - final LocalDiscovery master = firstMaster; - clusterService.submitStateUpdateTask("local-disco-initial_connect(master)", new ClusterStateUpdateTask() { - - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { - DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); - for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { - nodesBuilder.add(discovery.localNode()); - } - nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId()); - // remove the NO_MASTER block in this case - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(discoverySettings.getNoMasterBlock()); - return ClusterState.builder(currentState).nodes(nodesBuilder).blocks(blocks).build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - } - }); - } else if (firstMaster != null) { - // tell the master to send the fact that we are here - final LocalDiscovery master = firstMaster; - firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode() + "])", new ClusterStateUpdateTask() { - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { - DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); - for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { - nodesBuilder.add(discovery.localNode()); - } - nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId()); - currentState = ClusterState.builder(currentState).nodes(nodesBuilder).build(); - return master.allocationService.reroute(currentState, "node_add"); - } - - @Override - public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - } - - }); - } - } // else, no master node, the next node that will start will fill things in... - } - - @Override - protected void doStop() { - synchronized (clusterGroups) { - ClusterGroup clusterGroup = clusterGroups.get(clusterName); - if (clusterGroup == null) { - logger.warn("Illegal state, should not have an empty cluster group when stopping, I should be there at teh very least..."); - return; - } - clusterGroup.members().remove(this); - if (clusterGroup.members().isEmpty()) { - // no more members, remove and return - clusterGroups.remove(clusterName); - return; - } - - LocalDiscovery firstMaster = null; - for (LocalDiscovery localDiscovery : clusterGroup.members()) { - if (localDiscovery.localNode().isMasterNode()) { - firstMaster = localDiscovery; - break; - } - } - - if (firstMaster != null) { - // if the removed node is the master, make the next one as the master - if (master) { - firstMaster.master = true; - } - - final Set newMembers = new HashSet<>(); - for (LocalDiscovery discovery : clusterGroup.members()) { - newMembers.add(discovery.localNode().getId()); - } - - final LocalDiscovery master = firstMaster; - master.clusterService.submitStateUpdateTask("local-disco-update", new ClusterStateUpdateTask() { - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { - DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode().getId()); - DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes()); - if (delta.added()) { - logger.warn("No new nodes should be created when a new discovery view is accepted"); - } - // reroute here, so we eagerly remove dead nodes from the routing - ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build(); - return master.allocationService.deassociateDeadNodes(updatedState, true, "node stopped"); - } - - @Override - public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - } - }); - } - } - } - - @Override - protected void doClose() { - } - - @Override - public DiscoveryNode localNode() { - return clusterService.localNode(); - } - - @Override - public String nodeDescription() { - return clusterName.value() + "/" + localNode().getId(); - } - - @Override - public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) { - if (!master) { - throw new IllegalStateException("Shouldn't publish state when not master"); - } - LocalDiscovery[] members = members(); - if (members.length > 0) { - Set nodesToPublishTo = new HashSet<>(members.length); - for (LocalDiscovery localDiscovery : members) { - if (localDiscovery.master) { - continue; - } - nodesToPublishTo.add(localDiscovery.localNode()); - } - publish(members, clusterChangedEvent, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); - } - } - - @Override - public DiscoveryStats stats() { - return new DiscoveryStats((PendingClusterStateStats)null); - } - - @Override - public DiscoverySettings getDiscoverySettings() { - return discoverySettings; - } - - @Override - public int getMinimumMasterNodes() { - return -1; - } - - private LocalDiscovery[] members() { - ClusterGroup clusterGroup = clusterGroups.get(clusterName); - if (clusterGroup == null) { - return NO_MEMBERS; - } - Queue members = clusterGroup.members(); - return members.toArray(new LocalDiscovery[members.size()]); - } - - private void publish(LocalDiscovery[] members, ClusterChangedEvent clusterChangedEvent, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { - - try { - // we do the marshaling intentionally, to check it works well... - byte[] clusterStateBytes = null; - byte[] clusterStateDiffBytes = null; - - ClusterState clusterState = clusterChangedEvent.state(); - for (final LocalDiscovery discovery : members) { - if (discovery.master) { - continue; - } - ClusterState newNodeSpecificClusterState = null; - synchronized (this) { - // we do the marshaling intentionally, to check it works well... - // check if we published cluster state at least once and node was in the cluster when we published cluster state the last time - if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode())) { - // both conditions are true - which means we can try sending cluster state as diffs - if (clusterStateDiffBytes == null) { - Diff diff = clusterState.diff(clusterChangedEvent.previousState()); - BytesStreamOutput os = new BytesStreamOutput(); - diff.writeTo(os); - clusterStateDiffBytes = BytesReference.toBytes(os.bytes()); - } - try { - newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(StreamInput.wrap(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState); - logger.trace("sending diff cluster state version [{}] with size {} to [{}]", clusterState.version(), clusterStateDiffBytes.length, discovery.localNode().getName()); - } catch (IncompatibleClusterStateVersionException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("incompatible cluster state version [{}] - resending complete cluster state", clusterState.version()), ex); - } - } - if (newNodeSpecificClusterState == null) { - if (clusterStateBytes == null) { - clusterStateBytes = Builder.toBytes(clusterState); - } - newNodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode()); - } - discovery.lastProcessedClusterState = newNodeSpecificClusterState; - } - final ClusterState nodeSpecificClusterState = newNodeSpecificClusterState; - - nodeSpecificClusterState.status(ClusterState.ClusterStateStatus.RECEIVED); - // ignore cluster state messages that do not include "me", not in the game yet... - if (nodeSpecificClusterState.nodes().getLocalNode() != null) { - assert nodeSpecificClusterState.nodes().getMasterNode() != null : "received a cluster state without a master"; - assert !nodeSpecificClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock()) : "received a cluster state with a master block"; - - discovery.clusterService.submitStateUpdateTask("local-disco-receive(from master)", new ClusterStateUpdateTask() { - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) { - if (currentState.supersedes(nodeSpecificClusterState)) { - return currentState; - } - - if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) { - // its a fresh update from the master as we transition from a start of not having a master to having one - logger.debug("got first state from fresh master [{}]", nodeSpecificClusterState.nodes().getMasterNodeId()); - return nodeSpecificClusterState; - } - - ClusterState.Builder builder = ClusterState.builder(nodeSpecificClusterState); - // if the routing table did not change, use the original one - if (nodeSpecificClusterState.routingTable().version() == currentState.routingTable().version()) { - builder.routingTable(currentState.routingTable()); - } - if (nodeSpecificClusterState.metaData().version() == currentState.metaData().version()) { - builder.metaData(currentState.metaData()); - } - - return builder.build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - publishResponseHandler.onFailure(discovery.localNode(), e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - publishResponseHandler.onResponse(discovery.localNode()); - } - }); - } else { - publishResponseHandler.onResponse(discovery.localNode()); - } - } - - TimeValue publishTimeout = discoverySettings.getPublishTimeout(); - if (publishTimeout.millis() > 0) { - try { - boolean awaited = publishResponseHandler.awaitAllNodes(publishTimeout); - if (!awaited) { - DiscoveryNode[] pendingNodes = publishResponseHandler.pendingNodes(); - // everyone may have just responded - if (pendingNodes.length > 0) { - logger.warn("timed out waiting for all nodes to process published state [{}] (timeout [{}], pending nodes: {})", clusterState.version(), publishTimeout, pendingNodes); - } - } - } catch (InterruptedException e) { - // ignore & restore interrupt - Thread.currentThread().interrupt(); - } - } - - - } catch (Exception e) { - // failure to marshal or un-marshal - throw new IllegalStateException("Cluster state failed to serialize", e); - } - } - - private class ClusterGroup { - - private Queue members = ConcurrentCollections.newQueue(); - - Queue members() { - return members; - } - } -} diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java b/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java index b9ce7901369..247839397e0 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java @@ -21,9 +21,6 @@ package org.elasticsearch.discovery.zen; import org.elasticsearch.cluster.node.DiscoveryNodes; -/** - * - */ public interface DiscoveryNodesProvider { DiscoveryNodes nodes(); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java index 1d11f5cf0f5..7116597bdaf 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java @@ -38,9 +38,6 @@ import java.util.List; import java.util.Objects; import java.util.stream.Collectors; -/** - * - */ public class ElectMasterService extends AbstractComponent { public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = @@ -101,7 +98,6 @@ public class ElectMasterService extends AbstractComponent { } } - @Inject public ElectMasterService(Settings settings) { super(settings); this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings); @@ -178,7 +174,7 @@ public class ElectMasterService extends AbstractComponent { * Returns the given nodes sorted by likelihood of being elected as master, most likely first. * Non-master nodes are not removed but are rather put in the end */ - public List sortByMasterLikelihood(Iterable nodes) { + public static List sortByMasterLikelihood(Iterable nodes) { ArrayList sortedNodes = CollectionUtils.iterableAsArrayList(nodes); CollectionUtil.introSort(sortedNodes, ElectMasterService::compareNodes); return sortedNodes; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java similarity index 94% rename from core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java rename to core/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java index 1cfd46634a5..715e8be03ef 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java @@ -16,7 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.discovery.zen.fd; + +package org.elasticsearch.discovery.zen; + +import java.io.Closeable; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -32,10 +35,10 @@ import org.elasticsearch.transport.TransportService; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; /** - * A base class for {@link org.elasticsearch.discovery.zen.fd.MasterFaultDetection} & {@link org.elasticsearch.discovery.zen.fd.NodesFaultDetection}, + * A base class for {@link MasterFaultDetection} & {@link NodesFaultDetection}, * making sure both use the same setting. */ -public abstract class FaultDetection extends AbstractComponent { +public abstract class FaultDetection extends AbstractComponent implements Closeable { public static final Setting CONNECT_ON_NETWORK_DISCONNECT_SETTING = Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, Property.NodeScope); @@ -79,6 +82,7 @@ public abstract class FaultDetection extends AbstractComponent { } } + @Override public void close() { transportService.removeConnectionListener(connectionListener); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java similarity index 99% rename from core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java rename to core/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java index 04aee9db3d8..b7acfb685de 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.zen.fd; +package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java similarity index 87% rename from core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java rename to core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index 8740d12c5f7..7ff8f935927 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.zen.membership; +package org.elasticsearch.discovery.zen; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -38,9 +38,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.concurrent.TimeUnit; -/** - * - */ public class MembershipAction extends AbstractComponent { public static final String DISCOVERY_JOIN_ACTION_NAME = "internal:discovery/zen/join"; @@ -65,36 +62,42 @@ public class MembershipAction extends AbstractComponent { private final MembershipListener listener; - public MembershipAction(Settings settings, TransportService transportService, DiscoveryNodesProvider nodesProvider, MembershipListener listener) { + public MembershipAction(Settings settings, TransportService transportService, + DiscoveryNodesProvider nodesProvider, MembershipListener listener) { super(settings); this.transportService = transportService; this.nodesProvider = nodesProvider; this.listener = listener; - transportService.registerRequestHandler(DISCOVERY_JOIN_ACTION_NAME, JoinRequest::new, ThreadPool.Names.GENERIC, new JoinRequestRequestHandler()); - transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, ValidateJoinRequest::new, ThreadPool.Names.GENERIC, new ValidateJoinRequestRequestHandler()); - transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new, ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler()); + transportService.registerRequestHandler(DISCOVERY_JOIN_ACTION_NAME, JoinRequest::new, + ThreadPool.Names.GENERIC, new JoinRequestRequestHandler()); + transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, ValidateJoinRequest::new, + ThreadPool.Names.GENERIC, new ValidateJoinRequestRequestHandler()); + transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new, + ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler()); } public void sendLeaveRequest(DiscoveryNode masterNode, DiscoveryNode node) { - transportService.sendRequest(node, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(masterNode), EmptyTransportResponseHandler.INSTANCE_SAME); + transportService.sendRequest(node, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(masterNode), + EmptyTransportResponseHandler.INSTANCE_SAME); } public void sendLeaveRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) { - transportService.submitRequest(masterNode, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(timeout.millis(), TimeUnit.MILLISECONDS); + transportService.submitRequest(masterNode, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(node), + EmptyTransportResponseHandler.INSTANCE_SAME).txGet(timeout.millis(), TimeUnit.MILLISECONDS); } public void sendJoinRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) { - transportService.submitRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME) - .txGet(timeout.millis(), TimeUnit.MILLISECONDS); + transportService.submitRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), + EmptyTransportResponseHandler.INSTANCE_SAME).txGet(timeout.millis(), TimeUnit.MILLISECONDS); } /** * Validates the join request, throwing a failure if it failed. */ public void sendValidateJoinRequestBlocking(DiscoveryNode node, ClusterState state, TimeValue timeout) { - transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(state), EmptyTransportResponseHandler.INSTANCE_SAME) - .txGet(timeout.millis(), TimeUnit.MILLISECONDS); + transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(state), + EmptyTransportResponseHandler.INSTANCE_SAME).txGet(timeout.millis(), TimeUnit.MILLISECONDS); } public static class JoinRequest extends TransportRequest { diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 75cce695357..6d77e2f48fe 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -37,10 +37,9 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.membership.MembershipAction; import java.util.ArrayList; import java.util.Collections; @@ -385,7 +384,8 @@ public class NodeJoinController extends AbstractComponent { /** * a task indicated that the current node should become master, if no current master is known */ - private static final DiscoveryNode BECOME_MASTER_TASK = new DiscoveryNode("_BECOME_MASTER_TASK_", LocalTransportAddress.buildUnique(), + private static final DiscoveryNode BECOME_MASTER_TASK = new DiscoveryNode("_BECOME_MASTER_TASK_", + new TransportAddress(TransportAddress.META_ADDRESS, 0), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT) { @Override public String toString() { @@ -398,7 +398,7 @@ public class NodeJoinController extends AbstractComponent { * it may be use in combination with {@link #BECOME_MASTER_TASK} */ private static final DiscoveryNode FINISH_ELECTION_TASK = new DiscoveryNode("_FINISH_ELECTION_", - LocalTransportAddress.buildUnique(), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT) { + new TransportAddress(TransportAddress.META_ADDRESS, 0), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT) { @Override public String toString() { return ""; // this is not really task , so don't log anything about it... @@ -464,7 +464,7 @@ public class NodeJoinController extends AbstractComponent { } private ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState currentState, List joiningNodes) { - assert currentState.nodes().getMasterNodeId() == null : currentState.prettyPrint(); + assert currentState.nodes().getMasterNodeId() == null : currentState; DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes()); nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()) diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java similarity index 99% rename from core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java rename to core/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java index 6361d3cde39..5cd02a52504 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.zen.fd; +package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStateStats.java b/core/src/main/java/org/elasticsearch/discovery/zen/PendingClusterStateStats.java similarity index 98% rename from core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStateStats.java rename to core/src/main/java/org/elasticsearch/discovery/zen/PendingClusterStateStats.java index e060f688338..8facf2f282c 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStateStats.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/PendingClusterStateStats.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.zen.publish; +package org.elasticsearch.discovery.zen; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java b/core/src/main/java/org/elasticsearch/discovery/zen/PendingClusterStatesQueue.java similarity index 88% rename from core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java rename to core/src/main/java/org/elasticsearch/discovery/zen/PendingClusterStatesQueue.java index 01fb96b7133..018258066de 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueue.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/PendingClusterStatesQueue.java @@ -16,7 +16,8 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.discovery.zen.publish; + +package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -34,10 +35,12 @@ import java.util.Objects; *

    * The queue is bound by {@link #maxQueueSize}. When the queue is at capacity and a new cluster state is inserted * the oldest cluster state will be dropped. This is safe because: - * 1) Under normal operations, master will publish & commit a cluster state before processing another change (i.e., the queue length is 1) + * 1) Under normal operations, master will publish & commit a cluster state before processing + * another change (i.e., the queue length is 1) * 2) If the master fails to commit a change, it will step down, causing a master election, which will flush the queue. * 3) In general it's safe to process the incoming cluster state as a replacement to the cluster state that's dropped. - * a) If the dropped cluster is from the same master as the incoming one is, it is likely to be superseded by the incoming state (or another state in the queue). + * a) If the dropped cluster is from the same master as the incoming one is, it is likely to be superseded by the + * incoming state (or another state in the queue). * This is only not true in very extreme cases of out of order delivery. * b) If the dropping cluster state is not from the same master, it means that: * i) we are no longer following the master of the dropped cluster state but follow the incoming one @@ -70,7 +73,8 @@ public class PendingClusterStatesQueue { ClusterStateContext context = pendingStates.remove(0); logger.warn("dropping pending state [{}]. more than [{}] pending states.", context, maxQueueSize); if (context.committed()) { - context.listener.onNewClusterStateFailed(new ElasticsearchException("too many pending states ([{}] pending)", maxQueueSize)); + context.listener.onNewClusterStateFailed(new ElasticsearchException("too many pending states ([{}] pending)", + maxQueueSize)); } } } @@ -82,11 +86,13 @@ public class PendingClusterStatesQueue { public synchronized ClusterState markAsCommitted(String stateUUID, StateProcessedListener listener) { final ClusterStateContext context = findState(stateUUID); if (context == null) { - listener.onNewClusterStateFailed(new IllegalStateException("can't resolve cluster state with uuid [" + stateUUID + "] to commit")); + listener.onNewClusterStateFailed(new IllegalStateException("can't resolve cluster state with uuid" + + " [" + stateUUID + "] to commit")); return null; } if (context.committed()) { - listener.onNewClusterStateFailed(new IllegalStateException("cluster state with uuid [" + stateUUID + "] is already committed")); + listener.onNewClusterStateFailed(new IllegalStateException("cluster state with uuid" + + " [" + stateUUID + "] is already committed")); return null; } context.markAsCommitted(listener); @@ -94,13 +100,14 @@ public class PendingClusterStatesQueue { } /** - * mark that the processing of the given state has failed. All committed states that are {@link ClusterState#supersedes(ClusterState)}-ed - * by this failed state, will be failed as well + * mark that the processing of the given state has failed. All committed states that are + * {@link ClusterState#supersedes(ClusterState)}-ed by this failed state, will be failed as well */ public synchronized void markAsFailed(ClusterState state, Exception reason) { final ClusterStateContext failedContext = findState(state.stateUUID()); if (failedContext == null) { - throw new IllegalArgumentException("can't resolve failed cluster state with uuid [" + state.stateUUID() + "], version [" + state.version() + "]"); + throw new IllegalArgumentException("can't resolve failed cluster state with uuid [" + state.stateUUID() + + "], version [" + state.version() + "]"); } if (failedContext.committed() == false) { throw new IllegalArgumentException("failed cluster state is not committed " + state); @@ -128,15 +135,16 @@ public class PendingClusterStatesQueue { } /** - * indicates that a cluster state was successfully processed. Any committed state that is {@link ClusterState#supersedes(ClusterState)}-ed - * by the processed state will be marked as processed as well. + * indicates that a cluster state was successfully processed. Any committed state that is + * {@link ClusterState#supersedes(ClusterState)}-ed by the processed state will be marked as processed as well. *

    - * NOTE: successfully processing a state indicates we are following the master it came from. Any committed state from another master will - * be failed by this method + * NOTE: successfully processing a state indicates we are following the master it came from. Any committed state + * from another master will be failed by this method */ public synchronized void markAsProcessed(ClusterState state) { if (findState(state.stateUUID()) == null) { - throw new IllegalStateException("can't resolve processed cluster state with uuid [" + state.stateUUID() + "], version [" + state.version() + "]"); + throw new IllegalStateException("can't resolve processed cluster state with uuid [" + state.stateUUID() + + "], version [" + state.version() + "]"); } final DiscoveryNode currentMaster = state.nodes().getMasterNode(); assert currentMaster != null : "processed cluster state mast have a master. " + state; @@ -152,17 +160,16 @@ public class PendingClusterStatesQueue { contextsToRemove.add(pendingContext); if (pendingContext.committed()) { // this is a committed state , warn - logger.warn("received a cluster state (uuid[{}]/v[{}]) from a different master than the current one, rejecting (received {}, current {})", - pendingState.stateUUID(), pendingState.version(), - pendingMasterNode, currentMaster); + logger.warn("received a cluster state (uuid[{}]/v[{}]) from a different master than the current one," + + " rejecting (received {}, current {})", + pendingState.stateUUID(), pendingState.version(), pendingMasterNode, currentMaster); pendingContext.listener.onNewClusterStateFailed( - new IllegalStateException("cluster state from a different master than the current one, rejecting (received " + pendingMasterNode + ", current " + currentMaster + ")") - ); + new IllegalStateException("cluster state from a different master than the current one," + + " rejecting (received " + pendingMasterNode + ", current " + currentMaster + ")")); } else { - logger.trace("removing non-committed state with uuid[{}]/v[{}] from [{}] - a state from [{}] was successfully processed", - pendingState.stateUUID(), pendingState.version(), pendingMasterNode, - currentMaster - ); + logger.trace("removing non-committed state with uuid[{}]/v[{}] from [{}] - a state from" + + " [{}] was successfully processed", + pendingState.stateUUID(), pendingState.version(), pendingMasterNode, currentMaster); } } else if (pendingState.stateUUID().equals(state.stateUUID())) { assert pendingContext.committed() : "processed cluster state is not committed " + state; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java b/core/src/main/java/org/elasticsearch/discovery/zen/PingContextProvider.java similarity index 94% rename from core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java rename to core/src/main/java/org/elasticsearch/discovery/zen/PingContextProvider.java index 0bcc8b37d88..b705c918392 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/PingContextProvider.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/PingContextProvider.java @@ -17,14 +17,11 @@ * under the License. */ -package org.elasticsearch.discovery.zen.ping; +package org.elasticsearch.discovery.zen; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; -/** - * - */ public interface PingContextProvider extends DiscoveryNodesProvider { /** return the current cluster state of the node */ diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java similarity index 90% rename from core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java rename to core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java index 870e34cc1f3..58ba7bb177e 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.zen.publish; +package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.IncompatibleClusterStateVersionException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterStateStatus; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.Compressor; @@ -42,7 +43,6 @@ import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler; import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesTransportRequest; import org.elasticsearch.transport.EmptyTransportResponseHandler; @@ -66,9 +66,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; -/** - * - */ public class PublishClusterStateAction extends AbstractComponent { public static final String SEND_ACTION_NAME = "internal:discovery/zen/publish/send"; @@ -103,8 +100,10 @@ public class PublishClusterStateAction extends AbstractComponent { this.discoverySettings = discoverySettings; this.clusterName = clusterName; this.pendingStatesQueue = new PendingClusterStatesQueue(logger, settings.getAsInt(SETTINGS_MAX_PENDING_CLUSTER_STATES, 25)); - transportService.registerRequestHandler(SEND_ACTION_NAME, BytesTransportRequest::new, ThreadPool.Names.SAME, new SendClusterStateRequestHandler()); - transportService.registerRequestHandler(COMMIT_ACTION_NAME, CommitClusterStateRequest::new, ThreadPool.Names.SAME, new CommitClusterStateRequestHandler()); + transportService.registerRequestHandler(SEND_ACTION_NAME, BytesTransportRequest::new, ThreadPool.Names.SAME, false, false, + new SendClusterStateRequestHandler()); + transportService.registerRequestHandler(COMMIT_ACTION_NAME, CommitClusterStateRequest::new, ThreadPool.Names.SAME, false, false, + new CommitClusterStateRequestHandler()); } public PendingClusterStatesQueue pendingStatesQueue() { @@ -115,10 +114,12 @@ public class PublishClusterStateAction extends AbstractComponent { * publishes a cluster change event to other nodes. if at least minMasterNodes acknowledge the change it is committed and will * be processed by the master and the other nodes. *

    - * The method is guaranteed to throw a {@link org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException} if the change is not committed and should be rejected. + * The method is guaranteed to throw a {@link org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException} + * if the change is not committed and should be rejected. * Any other exception signals the something wrong happened but the change is committed. */ - public void publish(final ClusterChangedEvent clusterChangedEvent, final int minMasterNodes, final Discovery.AckListener ackListener) throws Discovery.FailedToCommitClusterStateException { + public void publish(final ClusterChangedEvent clusterChangedEvent, final int minMasterNodes, + final Discovery.AckListener ackListener) throws Discovery.FailedToCommitClusterStateException { final DiscoveryNodes nodes; final SendingController sendingController; final Set nodesToPublishTo; @@ -146,8 +147,10 @@ public class PublishClusterStateAction extends AbstractComponent { buildDiffAndSerializeStates(clusterChangedEvent.state(), clusterChangedEvent.previousState(), nodesToPublishTo, sendFullVersion, serializedStates, serializedDiffs); - final BlockingClusterStatePublishResponseHandler publishResponseHandler = new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener); - sendingController = new SendingController(clusterChangedEvent.state(), minMasterNodes, totalMasterNodes, publishResponseHandler); + final BlockingClusterStatePublishResponseHandler publishResponseHandler = + new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener); + sendingController = new SendingController(clusterChangedEvent.state(), minMasterNodes, + totalMasterNodes, publishResponseHandler); } catch (Exception e) { throw new Discovery.FailedToCommitClusterStateException("unexpected error while preparing to publish", e); } @@ -198,7 +201,8 @@ public class PublishClusterStateAction extends AbstractComponent { DiscoveryNode[] pendingNodes = publishResponseHandler.pendingNodes(); // everyone may have just responded if (pendingNodes.length > 0) { - logger.warn("timed out waiting for all nodes to process published state [{}] (timeout [{}], pending nodes: {})", clusterState.version(), publishTimeout, pendingNodes); + logger.warn("timed out waiting for all nodes to process published state [{}] (timeout [{}], pending nodes: {})", + clusterState.version(), publishTimeout, pendingNodes); } } } catch (InterruptedException e) { @@ -208,7 +212,8 @@ public class PublishClusterStateAction extends AbstractComponent { } private void buildDiffAndSerializeStates(ClusterState clusterState, ClusterState previousState, Set nodesToPublishTo, - boolean sendFullVersion, Map serializedStates, Map serializedDiffs) { + boolean sendFullVersion, Map serializedStates, + Map serializedDiffs) { Diff diff = null; for (final DiscoveryNode node : nodesToPublishTo) { try { @@ -241,7 +246,8 @@ public class PublishClusterStateAction extends AbstractComponent { serializedStates.put(node.getVersion(), bytes); } catch (Exception e) { logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e); + (org.apache.logging.log4j.util.Supplier) () -> + new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e); sendingController.onNodeSendFailed(node, e); return; } @@ -267,7 +273,8 @@ public class PublishClusterStateAction extends AbstractComponent { // -> no need to put a timeout on the options here, because we want the response to eventually be received // and not log an error if it arrives after the timeout // -> no need to compress, we already compressed the bytes - TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE).withCompress(false).build(); + TransportRequestOptions options = TransportRequestOptions.builder() + .withType(TransportRequestOptions.Type.STATE).withCompress(false).build(); transportService.sendRequest(node, SEND_ACTION_NAME, new BytesTransportRequest(bytes, node.getVersion()), options, @@ -276,7 +283,8 @@ public class PublishClusterStateAction extends AbstractComponent { @Override public void handleResponse(TransportResponse.Empty response) { if (sendingController.getPublishingTimedOut()) { - logger.debug("node {} responded for cluster state [{}] (took longer than [{}])", node, clusterState.version(), publishTimeout); + logger.debug("node {} responded for cluster state [{}] (took longer than [{}])", node, + clusterState.version(), publishTimeout); } sendingController.onNodeSendAck(node); } @@ -287,21 +295,24 @@ public class PublishClusterStateAction extends AbstractComponent { logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage()); sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController); } else { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to send cluster state to {}", node), exp); + logger.debug((org.apache.logging.log4j.util.Supplier) () -> + new ParameterizedMessage("failed to send cluster state to {}", node), exp); sendingController.onNodeSendFailed(node, exp); } } }); } catch (Exception e) { logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("error sending cluster state to {}", node), e); + (org.apache.logging.log4j.util.Supplier) () -> + new ParameterizedMessage("error sending cluster state to {}", node), e); sendingController.onNodeSendFailed(node, e); } } private void sendCommitToNode(final DiscoveryNode node, final ClusterState clusterState, final SendingController sendingController) { try { - logger.trace("sending commit for cluster state (uuid: [{}], version [{}]) to [{}]", clusterState.stateUUID(), clusterState.version(), node); + logger.trace("sending commit for cluster state (uuid: [{}], version [{}]) to [{}]", + clusterState.stateUUID(), clusterState.version(), node); TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE).build(); // no need to put a timeout on the options here, because we want the response to eventually be received // and not log an error if it arrives after the timeout @@ -320,12 +331,16 @@ public class PublishClusterStateAction extends AbstractComponent { @Override public void handleException(TransportException exp) { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), exp); + logger.debug((org.apache.logging.log4j.util.Supplier) () -> + new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", + clusterState.stateUUID(), clusterState.version(), node), exp); sendingController.getPublishResponseHandler().onFailure(node, exp); } }); } catch (Exception t) { - logger.warn((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), t); + logger.warn((org.apache.logging.log4j.util.Supplier) () -> + new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", + clusterState.stateUUID(), clusterState.version(), node), t); sendingController.getPublishResponseHandler().onFailure(node, t); } } @@ -372,7 +387,8 @@ public class PublishClusterStateAction extends AbstractComponent { } else if (lastSeenClusterState != null) { Diff diff = lastSeenClusterState.readDiffFrom(in); incomingState = diff.apply(lastSeenClusterState); - logger.debug("received diff cluster state version [{}] with uuid [{}], diff size [{}]", incomingState.version(), incomingState.stateUUID(), request.bytes().length()); + logger.debug("received diff cluster state version [{}] with uuid [{}], diff size [{}]", + incomingState.version(), incomingState.stateUUID(), request.bytes().length()); } else { logger.debug("received diff for but don't have any local cluster state - requesting full state"); throw new IncompatibleClusterStateVersionException("have no local cluster state"); @@ -382,7 +398,6 @@ public class PublishClusterStateAction extends AbstractComponent { pendingStatesQueue.addPending(incomingState); lastSeenClusterState = incomingState; - lastSeenClusterState.status(ClusterState.ClusterStateStatus.RECEIVED); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } @@ -395,13 +410,15 @@ public class PublishClusterStateAction extends AbstractComponent { void validateIncomingState(ClusterState incomingState, ClusterState lastSeenClusterState) { final ClusterName incomingClusterName = incomingState.getClusterName(); if (!incomingClusterName.equals(this.clusterName)) { - logger.warn("received cluster state from [{}] which is also master but with a different cluster name [{}]", incomingState.nodes().getMasterNode(), incomingClusterName); + logger.warn("received cluster state from [{}] which is also master but with a different cluster name [{}]", + incomingState.nodes().getMasterNode(), incomingClusterName); throw new IllegalStateException("received state from a node that is not part of the cluster"); } final ClusterState clusterState = clusterStateSupplier.get(); if (clusterState.nodes().getLocalNode().equals(incomingState.nodes().getLocalNode()) == false) { - logger.warn("received a cluster state from [{}] and not part of the cluster, should not happen", incomingState.nodes().getMasterNode()); + logger.warn("received a cluster state from [{}] and not part of the cluster, should not happen", + incomingState.nodes().getMasterNode()); throw new IllegalStateException("received state with a local node that does not match the current local node"); } @@ -420,7 +437,8 @@ public class PublishClusterStateAction extends AbstractComponent { } protected void handleCommitRequest(CommitClusterStateRequest request, final TransportChannel channel) { - final ClusterState state = pendingStatesQueue.markAsCommitted(request.stateUUID, new PendingClusterStatesQueue.StateProcessedListener() { + final ClusterState state = pendingStatesQueue.markAsCommitted(request.stateUUID, + new PendingClusterStatesQueue.StateProcessedListener() { @Override public void onNewClusterStateProcessed() { try { @@ -443,7 +461,8 @@ public class PublishClusterStateAction extends AbstractComponent { } }); if (state != null) { - newPendingClusterStatelistener.onNewClusterState("master " + state.nodes().getMasterNode() + " committed version [" + state.version() + "]"); + newPendingClusterStatelistener.onNewClusterState("master " + state.nodes().getMasterNode() + + " committed version [" + state.version() + "]"); } } @@ -513,13 +532,15 @@ public class PublishClusterStateAction extends AbstractComponent { // an external marker to note that the publishing process is timed out. This is useful for proper logging. final AtomicBoolean publishingTimedOut = new AtomicBoolean(); - private SendingController(ClusterState clusterState, int minMasterNodes, int totalMasterNodes, BlockingClusterStatePublishResponseHandler publishResponseHandler) { + private SendingController(ClusterState clusterState, int minMasterNodes, int totalMasterNodes, + BlockingClusterStatePublishResponseHandler publishResponseHandler) { this.clusterState = clusterState; this.publishResponseHandler = publishResponseHandler; this.neededMastersToCommit = Math.max(0, minMasterNodes - 1); // we are one of the master nodes this.pendingMasterNodes = totalMasterNodes - 1; if (this.neededMastersToCommit > this.pendingMasterNodes) { - throw new Discovery.FailedToCommitClusterStateException("not enough masters to ack sent cluster state. [{}] needed , have [{}]", neededMastersToCommit, pendingMasterNodes); + throw new Discovery.FailedToCommitClusterStateException("not enough masters to ack sent cluster state." + + "[{}] needed , have [{}]", neededMastersToCommit, pendingMasterNodes); } this.committed = neededMastersToCommit == 0; this.committedOrFailedLatch = new CountDownLatch(committed ? 0 : 1); @@ -593,7 +614,8 @@ public class PublishClusterStateAction extends AbstractComponent { public synchronized void onNodeSendFailed(DiscoveryNode node, Exception e) { if (node.isMasterNode()) { - logger.trace("master node {} failed to ack cluster state version [{}]. processing ... (current pending [{}], needed [{}])", + logger.trace("master node {} failed to ack cluster state version [{}]. " + + "processing ... (current pending [{}], needed [{}])", node, clusterState.version(), pendingMasterNodes, neededMastersToCommit); decrementPendingMasterAcksAndChangeForFailure(); } @@ -624,7 +646,8 @@ public class PublishClusterStateAction extends AbstractComponent { if (committedOrFailed()) { return committed == false; } - logger.trace((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to commit version [{}]. {}", clusterState.version(), details), reason); + logger.trace((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to commit version [{}]. {}", + clusterState.version(), details), reason); committed = false; committedOrFailedLatch.countDown(); return true; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastHostsProvider.java b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java similarity index 95% rename from core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastHostsProvider.java rename to core/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java index dbfaed572b1..9ff3215cd64 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastHostsProvider.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.zen.ping.unicast; +package org.elasticsearch.discovery.zen; import org.elasticsearch.cluster.node.DiscoveryNode; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java similarity index 92% rename from core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java rename to core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 637730c75fd..f6870cc05b6 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -17,7 +17,27 @@ * under the License. */ -package org.elasticsearch.discovery.zen.ping.unicast; +package org.elasticsearch.discovery.zen; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -30,8 +50,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; @@ -44,9 +63,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.discovery.zen.ping.PingContextProvider; -import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteTransportException; @@ -59,37 +75,13 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; - import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; -import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPingResponse; +import static org.elasticsearch.discovery.zen.ZenPing.PingResponse.readPingResponse; -/** - * - */ -public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPing { +public class UnicastZenPing extends AbstractComponent implements ZenPing { public static final String ACTION_NAME = "internal:discovery/zen/unicast"; public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = @@ -102,11 +94,9 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin public static final int LIMIT_FOREIGN_PORTS_COUNT = 1; public static final int LIMIT_LOCAL_PORTS_COUNT = 5; - private final ThreadPool threadPool; private final TransportService transportService; private final ClusterName clusterName; - private final ElectMasterService electMasterService; private final int concurrentConnects; @@ -127,26 +117,19 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin // a list of temporal responses a node will return for a request (holds requests from other configuredTargetNodes) private final Queue temporalResponses = ConcurrentCollections.newQueue(); - private final CopyOnWriteArrayList hostsProviders = new CopyOnWriteArrayList<>(); + private final UnicastHostsProvider hostsProvider; private final ExecutorService unicastConnectExecutor; private volatile boolean closed = false; - @Inject public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, - ElectMasterService electMasterService, @Nullable Set unicastHostsProviders) { + UnicastHostsProvider unicastHostsProvider) { super(settings); this.threadPool = threadPool; this.transportService = transportService; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - this.electMasterService = electMasterService; - - if (unicastHostsProviders != null) { - for (UnicastHostsProvider unicastHostsProvider : unicastHostsProviders) { - addHostsProvider(unicastHostsProvider); - } - } + this.hostsProvider = unicastHostsProvider; this.concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); List hosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings); @@ -203,30 +186,14 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin } @Override - protected void doStart() { - } - - @Override - protected void doStop() { - } - - @Override - protected void doClose() { + public void close() throws IOException { ThreadPool.terminate(unicastConnectExecutor, 0, TimeUnit.SECONDS); - try { - IOUtils.close(receivedResponses.values()); - } catch (IOException e) { - throw new ElasticsearchException("Error wile closing send ping handlers", e); - } + IOUtils.close(receivedResponses.values()); closed = true; } - public void addHostsProvider(UnicastHostsProvider provider) { - hostsProviders.add(provider); - } - @Override - public void setPingContextProvider(PingContextProvider contextProvider) { + public void start(PingContextProvider contextProvider) { this.contextProvider = contextProvider; } @@ -353,10 +320,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin nodesToPingSet.add(temporalResponse.node()); } } - - for (UnicastHostsProvider provider : hostsProviders) { - nodesToPingSet.addAll(provider.buildDynamicNodes()); - } + nodesToPingSet.addAll(hostsProvider.buildDynamicNodes()); // add all possible master nodes that were active in the last known cluster configuration for (ObjectCursor masterNode : discoNodes.getMasterNodes().values()) { @@ -364,7 +328,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin } // sort the nodes by likelihood of being an active master - List sortedNodesToPing = electMasterService.sortByMasterLikelihood(nodesToPingSet); + List sortedNodesToPing = ElectMasterService.sortByMasterLikelihood(nodesToPingSet); // new add the unicast targets first List nodesToPing = CollectionUtils.arrayAsArrayList(configuredTargetNodes); @@ -521,9 +485,6 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin } private UnicastPingResponse handlePingRequest(final UnicastPingRequest request) { - if (!lifecycle.started()) { - throw new IllegalStateException("received ping request while not started"); - } temporalResponses.add(request.pingResponse); threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME, new Runnable() { @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 833349e9d9a..8f02b037c20 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -22,6 +22,7 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -54,14 +55,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.discovery.zen.fd.MasterFaultDetection; -import org.elasticsearch.discovery.zen.fd.NodesFaultDetection; -import org.elasticsearch.discovery.zen.membership.MembershipAction; -import org.elasticsearch.discovery.zen.ping.PingContextProvider; -import org.elasticsearch.discovery.zen.ping.ZenPing; -import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats; -import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; import org.elasticsearch.transport.TransportChannel; @@ -75,6 +68,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -113,7 +107,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private AllocationService allocationService; private final ClusterName clusterName; private final DiscoverySettings discoverySettings; - private final ZenPingService pingService; + private final ZenPing zenPing; private final MasterFaultDetection masterFD; private final NodesFaultDetection nodesFD; private final PublishClusterStateAction publishClusterState; @@ -144,19 +138,16 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private volatile NodeJoinController nodeJoinController; private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; - @Inject - public ZenDiscovery(Settings settings, ThreadPool threadPool, - TransportService transportService, final ClusterService clusterService, ClusterSettings clusterSettings, - ZenPingService pingService, ElectMasterService electMasterService) { + public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, ClusterSettings clusterSettings, ZenPing zenPing) { super(settings); this.clusterService = clusterService; this.clusterName = clusterService.getClusterName(); this.transportService = transportService; this.discoverySettings = new DiscoverySettings(settings, clusterSettings); - this.pingService = pingService; - this.electMaster = electMasterService; + this.zenPing = zenPing; + this.electMaster = new ElectMasterService(settings); this.pingTimeout = PING_TIMEOUT_SETTING.get(settings); - this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); this.joinRetryAttempts = JOIN_RETRY_ATTEMPTS_SETTING.get(settings); this.joinRetryDelay = JOIN_RETRY_DELAY_SETTING.get(settings); @@ -179,7 +170,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterService); this.masterFD.addListener(new MasterNodeFailureListener()); - this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService, clusterService.getClusterName()); this.nodesFD.addListener(new NodeFaultDetectionListener()); @@ -191,9 +181,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover new NewPendingClusterStateListener(), discoverySettings, clusterService.getClusterName()); - this.pingService.setPingContextProvider(this); this.membership = new MembershipAction(settings, transportService, this, new MembershipListener()); - this.joinThreadControl = new JoinThreadControl(threadPool); transportService.registerRequestHandler( @@ -209,7 +197,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover protected void doStart() { nodesFD.setLocalNode(clusterService.localNode()); joinThreadControl.start(); - pingService.start(); + zenPing.start(this); this.nodeJoinController = new NodeJoinController(clusterService, allocationService, electMaster, discoverySettings, settings); this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::rejoin, logger); } @@ -241,7 +229,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover @Override protected void doStop() { joinThreadControl.stop(); - pingService.stop(); masterFD.stop("zen disco stop"); nodesFD.stop(); DiscoveryNodes nodes = nodes(); @@ -272,10 +259,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } @Override - protected void doClose() { - masterFD.close(); - nodesFD.close(); - pingService.close(); + protected void doClose() throws IOException { + IOUtils.close(masterFD, nodesFD, zenPing); } @Override @@ -288,7 +273,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return clusterName.value() + "/" + clusterService.localNode().getId(); } - /** start of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */ + /** start of {@link PingContextProvider } implementation */ @Override public DiscoveryNodes nodes() { return clusterService.state().nodes(); @@ -299,7 +284,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return clusterService.state(); } - /** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */ + /** end of {@link PingContextProvider } implementation */ @Override @@ -330,6 +315,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover // update the set of nodes to ping after the new cluster state has been published nodesFD.updateNodesAndPing(clusterChangedEvent.state()); + + // clean the pending cluster queue - we are currently master, so any pending cluster state should be failed + // note that we also clean the queue on master failure (see handleMasterGone) but a delayed cluster state publish + // from a stale master can still make it in the queue during the election (but not be committed) + publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("elected as master")); } /** @@ -370,6 +360,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return publishClusterState.pendingStatesQueue().pendingClusterStates(); } + PendingClusterStatesQueue pendingClusterStatesQueue() { + return publishClusterState.pendingStatesQueue(); + } + /** * the main function of a join thread. This function is guaranteed to join the cluster * or spawn a new join thread upon failure to do so. @@ -691,15 +685,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return currentState; } - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(currentState.nodes()) - // make sure the old master node, which has failed, is not part of the nodes we publish - .remove(masterNode) - .masterNodeId(null).build(); - // flush any pending cluster states from old master, so it will not be set as master again publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("master left [{}]", reason)); - return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master left (reason = " + reason + ")"); + return rejoin(currentState, "master left (reason = " + reason + ")"); } @Override @@ -875,7 +864,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private DiscoveryNode findMaster() { logger.trace("starting to ping"); - List fullPingResponses = pingService.pingAndWait(pingTimeout).toList(); + List fullPingResponses = pingAndWait(pingTimeout).toList(); if (fullPingResponses == null) { logger.trace("No full ping responses"); return null; @@ -1017,6 +1006,28 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } } + private ZenPing.PingCollection pingAndWait(TimeValue timeout) { + final ZenPing.PingCollection response = new ZenPing.PingCollection(); + final CountDownLatch latch = new CountDownLatch(1); + try { + zenPing.ping(pings -> { + response.addPings(pings); + latch.countDown(); + }, timeout); + } catch (Exception ex) { + logger.warn("Ping execution failed", ex); + latch.countDown(); + } + + try { + latch.await(); + return response; + } catch (InterruptedException e) { + logger.trace("pingAndWait interrupted"); + return response; + } + } + private class NewPendingClusterStateListener implements PublishClusterStateAction.NewPendingClusterStateListener { @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java similarity index 94% rename from core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java rename to core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java index b4bb61ad461..cb2c8cb5019 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java @@ -17,18 +17,9 @@ * under the License. */ -package org.elasticsearch.discovery.zen.ping; - -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.zen.ElectMasterService; +package org.elasticsearch.discovery.zen; +import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -37,11 +28,19 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.unit.TimeValue; + import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; -public interface ZenPing extends LifecycleComponent { +public interface ZenPing extends Closeable { - void setPingContextProvider(PingContextProvider contextProvider); + void start(PingContextProvider contextProvider); void ping(PingListener listener, TimeValue timeout); @@ -159,8 +158,8 @@ public interface ZenPing extends LifecycleComponent { @Override public String toString() { - return "ping_response{node [" + node + "], id[" + id + "], master [" + master + "], cluster_state_version [" + clusterStateVersion - + "], cluster_name[" + clusterName.value() + "]}"; + return "ping_response{node [" + node + "], id[" + id + "], master [" + master + "]," + + "cluster_state_version [" + clusterStateVersion + "], cluster_name[" + clusterName.value() + "]}"; } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java deleted file mode 100644 index 3a2ddc10cfb..00000000000 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.zen.ping; - -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; - -public class ZenPingService extends AbstractLifecycleComponent { - - private List zenPings = Collections.emptyList(); - - @Inject - public ZenPingService(Settings settings, Set zenPings) { - super(settings); - this.zenPings = Collections.unmodifiableList(new ArrayList<>(zenPings)); - } - - public List zenPings() { - return this.zenPings; - } - - public void setPingContextProvider(PingContextProvider contextProvider) { - if (lifecycle.started()) { - throw new IllegalStateException("Can't set nodes provider when started"); - } - for (ZenPing zenPing : zenPings) { - zenPing.setPingContextProvider(contextProvider); - } - } - - @Override - protected void doStart() { - for (ZenPing zenPing : zenPings) { - zenPing.start(); - } - } - - @Override - protected void doStop() { - for (ZenPing zenPing : zenPings) { - zenPing.stop(); - } - } - - @Override - protected void doClose() { - for (ZenPing zenPing : zenPings) { - zenPing.close(); - } - } - - public ZenPing.PingCollection pingAndWait(TimeValue timeout) { - final ZenPing.PingCollection response = new ZenPing.PingCollection(); - final CountDownLatch latch = new CountDownLatch(zenPings.size()); - for (ZenPing zenPing : zenPings) { - final AtomicBoolean counted = new AtomicBoolean(); - try { - zenPing.ping(pings -> { - response.addPings(pings); - if (counted.compareAndSet(false, true)) { - latch.countDown(); - } - }, timeout); - } catch (Exception ex) { - logger.warn("Ping execution failed", ex); - if (counted.compareAndSet(false, true)) { - latch.countDown(); - } - } - } - try { - latch.await(); - return response; - } catch (InterruptedException e) { - logger.trace("pingAndWait interrupted"); - return response; - } - } -} diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index f3e1f2fb24d..401f3f12f4b 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -1002,11 +1002,16 @@ public final class NodeEnvironment implements Closeable { private static void tryWriteTempFile(Path path) throws IOException { if (Files.exists(path)) { Path resolve = path.resolve(".es_temp_file"); + boolean tempFileCreated = false; try { Files.createFile(resolve); - Files.deleteIfExists(resolve); + tempFileCreated = true; } catch (IOException ex) { throw new IOException("failed to write in data directory [" + path + "] write permission is required", ex); + } finally { + if (tempFileCreated) { + Files.deleteIfExists(resolve); + } } } } diff --git a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index 42c40034b10..37277586bf7 100644 --- a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -124,7 +124,7 @@ public abstract class AsyncShardFetch implements Rel } // if we are still fetching, return null to indicate it - if (hasAnyNodeFetching(cache) == true) { + if (hasAnyNodeFetching(cache)) { return new FetchResult<>(shardId, null, emptySet(), emptySet()); } else { // nothing to fetch, yay, build the return value @@ -137,7 +137,7 @@ public abstract class AsyncShardFetch implements Rel DiscoveryNode node = nodes.get(nodeId); if (node != null) { - if (nodeEntry.isFailed() == true) { + if (nodeEntry.isFailed()) { // if its failed, remove it from the list of nodes, so if this run doesn't work // we try again next round to fetch it again it.remove(); @@ -361,7 +361,7 @@ public abstract class AsyncShardFetch implements Rel } void doneFetching(T value) { - assert fetching == true : "setting value but not in fetching mode"; + assert fetching : "setting value but not in fetching mode"; assert failure == null : "setting value when failure already set"; this.valueSet = true; this.value = value; @@ -369,7 +369,7 @@ public abstract class AsyncShardFetch implements Rel } void doneFetching(Throwable failure) { - assert fetching == true : "setting value but not in fetching mode"; + assert fetching : "setting value but not in fetching mode"; assert valueSet == false : "setting failure when already set value"; assert failure != null : "setting failure can't be null"; this.failure = failure; @@ -377,7 +377,7 @@ public abstract class AsyncShardFetch implements Rel } void restartFetching() { - assert fetching == true : "restarting fetching, but not in fetching mode"; + assert fetching : "restarting fetching, but not in fetching mode"; assert valueSet == false : "value can't be set when restarting fetching"; assert failure == null : "failure can't be set when restarting fetching"; this.fetching = false; @@ -388,7 +388,7 @@ public abstract class AsyncShardFetch implements Rel } boolean hasData() { - return valueSet == true || failure != null; + return valueSet || failure != null; } Throwable getFailure() { @@ -399,7 +399,7 @@ public abstract class AsyncShardFetch implements Rel @Nullable T getValue() { assert failure == null : "trying to fetch value, but its marked as failed, check isFailed"; - assert valueSet == true : "value is not set, hasn't been fetched yet"; + assert valueSet : "value is not set, hasn't been fetched yet"; return value; } } diff --git a/core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java index 3874d54f457..461fcf1e2d2 100644 --- a/core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java @@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; @@ -53,21 +53,21 @@ public abstract class BaseGatewayShardAllocator extends AbstractComponent { final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); while (unassignedIterator.hasNext()) { final ShardRouting shard = unassignedIterator.next(); - final UnassignedShardDecision unassignedShardDecision = makeAllocationDecision(shard, allocation, logger); + final ShardAllocationDecision shardAllocationDecision = makeAllocationDecision(shard, allocation, logger); - if (unassignedShardDecision.isDecisionTaken() == false) { + if (shardAllocationDecision.isDecisionTaken() == false) { // no decision was taken by this allocator continue; } - if (unassignedShardDecision.getFinalDecisionSafe().type() == Decision.Type.YES) { - unassignedIterator.initialize(unassignedShardDecision.getAssignedNodeId(), - unassignedShardDecision.getAllocationId(), + if (shardAllocationDecision.getFinalDecisionSafe() == Decision.Type.YES) { + unassignedIterator.initialize(shardAllocationDecision.getAssignedNodeId(), + shardAllocationDecision.getAllocationId(), shard.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE : allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); } else { - unassignedIterator.removeAndIgnore(unassignedShardDecision.getAllocationStatus(), allocation.changes()); + unassignedIterator.removeAndIgnore(shardAllocationDecision.getAllocationStatus(), allocation.changes()); } } } @@ -80,9 +80,9 @@ public abstract class BaseGatewayShardAllocator extends AbstractComponent { * @param unassignedShard the unassigned shard to allocate * @param allocation the current routing state * @param logger the logger - * @return an {@link UnassignedShardDecision} with the final decision of whether to allocate and details of the decision + * @return an {@link ShardAllocationDecision} with the final decision of whether to allocate and details of the decision */ - public abstract UnassignedShardDecision makeAllocationDecision(ShardRouting unassignedShard, + public abstract ShardAllocationDecision makeAllocationDecision(ShardRouting unassignedShard, RoutingAllocation allocation, Logger logger); } diff --git a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index 370778898fc..0c829e88182 100644 --- a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -153,7 +153,7 @@ public class DanglingIndicesState extends AbstractComponent { * for allocation. */ private void allocateDanglingIndices() { - if (danglingIndices.isEmpty() == true) { + if (danglingIndices.isEmpty()) { return; } try { diff --git a/core/src/main/java/org/elasticsearch/gateway/Gateway.java b/core/src/main/java/org/elasticsearch/gateway/Gateway.java index 3030632a769..3a6bfa7aec1 100644 --- a/core/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/core/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -34,10 +34,10 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.Index; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.indices.IndicesService; import java.util.Arrays; +import java.util.Map; import java.util.function.Supplier; public class Gateway extends AbstractComponent implements ClusterStateListener { @@ -50,13 +50,11 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { private final Supplier minimumMasterNodesProvider; private final IndicesService indicesService; - private final NodeServicesProvider nodeServicesProvider; public Gateway(Settings settings, ClusterService clusterService, GatewayMetaState metaState, TransportNodesListGatewayMetaState listGatewayMetaState, Discovery discovery, - NodeServicesProvider nodeServicesProvider, IndicesService indicesService) { + IndicesService indicesService) { super(settings); - this.nodeServicesProvider = nodeServicesProvider; this.indicesService = indicesService; this.clusterService = clusterService; this.metaState = metaState; @@ -133,7 +131,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { try { if (electedIndexMetaData.getState() == IndexMetaData.State.OPEN) { // verify that we can actually create this index - if not we recover it as closed with lots of warn logs - indicesService.verifyIndexMetadata(nodeServicesProvider, electedIndexMetaData, electedIndexMetaData); + indicesService.verifyIndexMetadata(electedIndexMetaData, electedIndexMetaData); } } catch (Exception e) { final Index electedIndex = electedIndexMetaData.getIndex(); @@ -148,13 +146,35 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { } } final ClusterSettings clusterSettings = clusterService.getClusterSettings(); - metaDataBuilder.persistentSettings(clusterSettings.archiveUnknownOrBrokenSettings(metaDataBuilder.persistentSettings())); - metaDataBuilder.transientSettings(clusterSettings.archiveUnknownOrBrokenSettings(metaDataBuilder.transientSettings())); + metaDataBuilder.persistentSettings( + clusterSettings.archiveUnknownOrInvalidSettings( + metaDataBuilder.persistentSettings(), + e -> logUnknownSetting("persistent", e), + (e, ex) -> logInvalidSetting("persistent", e, ex))); + metaDataBuilder.transientSettings( + clusterSettings.archiveUnknownOrInvalidSettings( + metaDataBuilder.transientSettings(), + e -> logUnknownSetting("transient", e), + (e, ex) -> logInvalidSetting("transient", e, ex))); ClusterState.Builder builder = ClusterState.builder(clusterService.getClusterName()); builder.metaData(metaDataBuilder); listener.onSuccess(builder.build()); } + private void logUnknownSetting(String settingType, Map.Entry e) { + logger.warn("ignoring unknown {} setting: [{}] with value [{}]; archiving", settingType, e.getKey(), e.getValue()); + } + + private void logInvalidSetting(String settingType, Map.Entry e, IllegalArgumentException ex) { + logger.warn( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("ignoring invalid {} setting: [{}] with value [{}]; archiving", + settingType, + e.getKey(), + e.getValue()), + ex); + } + @Override public void clusterChanged(final ClusterChangedEvent event) { // order is important, first metaState, and then shardsState diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 450255575d9..65a2876b3aa 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -42,9 +42,6 @@ import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; import java.util.List; import java.util.concurrent.ConcurrentMap; -/** - * - */ public class GatewayAllocator extends AbstractComponent { private RoutingService routingService; @@ -88,7 +85,7 @@ public class GatewayAllocator extends AbstractComponent { boolean cleanCache = false; DiscoveryNode localNode = event.state().nodes().getLocalNode(); if (localNode != null) { - if (localNode.isMasterNode() == true && event.localNodeMaster() == false) { + if (localNode.isMasterNode() && event.localNodeMaster() == false) { cleanCache = true; } } else { @@ -177,7 +174,7 @@ public class GatewayAllocator extends AbstractComponent { AsyncShardFetch.FetchResult shardState = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); - if (shardState.hasData() == true) { + if (shardState.hasData()) { shardState.processAllocation(allocation); } return shardState; @@ -202,7 +199,7 @@ public class GatewayAllocator extends AbstractComponent { } AsyncShardFetch.FetchResult shardStores = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); - if (shardStores.hasData() == true) { + if (shardStores.hasData()) { shardStores.processAllocation(allocation); } return shardStores; diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayException.java b/core/src/main/java/org/elasticsearch/gateway/GatewayException.java index 619d3d5f41e..32050f1c10e 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayException.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class GatewayException extends ElasticsearchException { public GatewayException(String msg) { diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index a05e85299a8..b609d0bacae 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -192,7 +192,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL Set relevantIndices; if (isDataOnlyNode(state)) { relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previousState, previouslyWrittenIndices); - } else if (state.nodes().getLocalNode().isMasterNode() == true) { + } else if (state.nodes().getLocalNode().isMasterNode()) { relevantIndices = getRelevantIndicesForMasterEligibleNode(state); } else { relevantIndices = Collections.emptySet(); diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayModule.java b/core/src/main/java/org/elasticsearch/gateway/GatewayModule.java index 8169062a70b..b78310171e4 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayModule.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayModule.java @@ -22,9 +22,6 @@ package org.elasticsearch.gateway; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.settings.Settings; -/** - * - */ public class GatewayModule extends AbstractModule { diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java index 856574748db..2e351b2e6bf 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -20,6 +20,7 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -43,16 +44,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; import java.util.concurrent.atomic.AtomicBoolean; -/** - * - */ public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener { public static final Setting EXPECTED_NODES_SETTING = @@ -98,10 +95,10 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste public GatewayService(Settings settings, AllocationService allocationService, ClusterService clusterService, ThreadPool threadPool, GatewayMetaState metaState, TransportNodesListGatewayMetaState listGatewayMetaState, Discovery discovery, - NodeServicesProvider nodeServicesProvider, IndicesService indicesService) { + IndicesService indicesService) { super(settings); this.gateway = new Gateway(settings, clusterService, metaState, listGatewayMetaState, discovery, - nodeServicesProvider, indicesService); + indicesService); this.allocationService = allocationService; this.clusterService = clusterService; this.threadPool = threadPool; diff --git a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 24562b52163..dcaccb88269 100644 --- a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -50,8 +50,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; -/** - */ public class LocalAllocateDangledIndices extends AbstractComponent { public static final String ACTION_NAME = "internal:gateway/local/allocate_dangled"; diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 25ae3b7cce9..7d8e8327d39 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -32,7 +32,7 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; import org.elasticsearch.common.settings.Setting; @@ -110,20 +110,20 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { } @Override - public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard, + public ShardAllocationDecision makeAllocationDecision(final ShardRouting unassignedShard, final RoutingAllocation allocation, final Logger logger) { if (isResponsibleFor(unassignedShard) == false) { // this allocator is not responsible for allocating this shard - return UnassignedShardDecision.DECISION_NOT_TAKEN; + return ShardAllocationDecision.DECISION_NOT_TAKEN; } final boolean explain = allocation.debugDecision(); final FetchResult shardState = fetchData(unassignedShard, allocation); if (shardState.hasData() == false) { allocation.setHasPendingAsyncFetch(); - return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA, - "still fetching shard state from the nodes in the cluster"); + return ShardAllocationDecision.no(AllocationStatus.FETCHING_SHARD_DATA, + explain ? "still fetching shard state from the nodes in the cluster" : null); } // don't create a new IndexSetting object for every shard as this could cause a lot of garbage @@ -167,19 +167,19 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { // let BalancedShardsAllocator take care of allocating this shard logger.debug("[{}][{}]: missing local data, will restore from [{}]", unassignedShard.index(), unassignedShard.id(), unassignedShard.recoverySource()); - return UnassignedShardDecision.DECISION_NOT_TAKEN; + return ShardAllocationDecision.DECISION_NOT_TAKEN; } else if (recoverOnAnyNode) { // let BalancedShardsAllocator take care of allocating this shard logger.debug("[{}][{}]: missing local data, recover from any node", unassignedShard.index(), unassignedShard.id()); - return UnassignedShardDecision.DECISION_NOT_TAKEN; + return ShardAllocationDecision.DECISION_NOT_TAKEN; } else { // We have a shard that was previously allocated, but we could not find a valid shard copy to allocate the primary. // We could just be waiting for the node that holds the primary to start back up, in which case the allocation for // this shard will be picked up when the node joins and we do another allocation reroute logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", unassignedShard.index(), unassignedShard.id(), nodeShardsResult.allocationsFound); - return UnassignedShardDecision.noDecision(AllocationStatus.NO_VALID_SHARD_COPY, - "shard was previously allocated, but no valid shard copy could be found amongst the current nodes in the cluster"); + return ShardAllocationDecision.no(AllocationStatus.NO_VALID_SHARD_COPY, + explain ? "shard was previously allocated, but no valid shard copy could be found amongst the nodes in the cluster" : null); } } @@ -191,10 +191,11 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", unassignedShard.index(), unassignedShard.id(), unassignedShard, decidedNode.nodeShardState.getNode()); final String nodeId = decidedNode.nodeShardState.getNode().getId(); - return UnassignedShardDecision.yesDecision( + return ShardAllocationDecision.yes(nodeId, "the allocation deciders returned a YES decision to allocate to node [" + nodeId + "]", - nodeId, decidedNode.nodeShardState.allocationId(), buildNodeDecisions(nodesToAllocate, explain)); - } else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) { + decidedNode.nodeShardState.allocationId(), + buildNodeDecisions(nodesToAllocate, explain)); + } else if (nodesToAllocate.throttleNodeShards.isEmpty() && !nodesToAllocate.noNodeShards.isEmpty()) { // The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard // can be force-allocated to one of the nodes. final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate( @@ -206,22 +207,21 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation", unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeShardState.getNode()); final String nodeId = nodeShardState.getNode().getId(); - return UnassignedShardDecision.yesDecision( + return ShardAllocationDecision.yes(nodeId, "allocating the primary shard to node [" + nodeId+ "], which has a complete copy of the shard data", - nodeId, nodeShardState.allocationId(), buildNodeDecisions(nodesToForceAllocate, explain)); } else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) { logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation", unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToForceAllocate.throttleNodeShards); - return UnassignedShardDecision.throttleDecision( - "allocation throttled as all nodes to which the shard may be force allocated are busy with other recoveries", + return ShardAllocationDecision.throttle( + explain ? "allocation throttled as all nodes to which the shard may be force allocated are busy with other recoveries" : null, buildNodeDecisions(nodesToForceAllocate, explain)); } else { logger.debug("[{}][{}]: forced primary allocation denied [{}]", unassignedShard.index(), unassignedShard.id(), unassignedShard); - return UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, - "all nodes that hold a valid shard copy returned a NO decision, and force allocation is not permitted", + return ShardAllocationDecision.no(AllocationStatus.DECIDERS_NO, + explain ? "all nodes that hold a valid shard copy returned a NO decision, and force allocation is not permitted" : null, buildNodeDecisions(nodesToForceAllocate, explain)); } } else { @@ -229,8 +229,8 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { // taking place on the node currently, ignore it for now logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToAllocate.throttleNodeShards); - return UnassignedShardDecision.throttleDecision( - "allocation throttled as all nodes to which the shard may be allocated are busy with other recoveries", + return ShardAllocationDecision.throttle( + explain ? "allocation throttled as all nodes to which the shard may be allocated are busy with other recoveries" : null, buildNodeDecisions(nodesToAllocate, explain)); } } diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index 8f90e072ed2..4c73ae067b6 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -32,7 +32,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; @@ -40,6 +40,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; +import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData; import java.util.ArrayList; import java.util.HashMap; @@ -47,8 +48,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -/** - */ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { public ReplicaShardAllocator(Settings settings) { @@ -66,7 +65,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { List shardCancellationActions = new ArrayList<>(); for (RoutingNode routingNode : routingNodes) { for (ShardRouting shard : routingNode) { - if (shard.primary() == true) { + if (shard.primary()) { continue; } if (shard.initializing() == false) { @@ -81,7 +80,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { continue; } - AsyncShardFetch.FetchResult shardStores = fetchData(shard, allocation); + AsyncShardFetch.FetchResult shardStores = fetchData(shard, allocation); if (shardStores.hasData() == false) { logger.trace("{}: fetching new stores for initializing shard", shard); continue; // still fetching @@ -110,7 +109,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { } if (currentNode.equals(nodeWithHighestMatch) == false && Objects.equals(currentSyncId, primaryStore.syncId()) == false - && matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch) == true) { + && matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch)) { // we found a better match that has a full sync id match, the existing allocation is not fully synced // so we found a better one, cancel this one logger.debug("cancelling allocation of replica on [{}], sync id match found on node [{}]", @@ -140,12 +139,12 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { } @Override - public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard, + public ShardAllocationDecision makeAllocationDecision(final ShardRouting unassignedShard, final RoutingAllocation allocation, final Logger logger) { if (isResponsibleFor(unassignedShard) == false) { // this allocator is not responsible for deciding on this shard - return UnassignedShardDecision.DECISION_NOT_TAKEN; + return ShardAllocationDecision.DECISION_NOT_TAKEN; } final RoutingNodes routingNodes = allocation.routingNodes(); @@ -154,17 +153,17 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { Tuple> allocateDecision = canBeAllocatedToAtLeastOneNode(unassignedShard, allocation, explain); if (allocateDecision.v1().type() != Decision.Type.YES) { logger.trace("{}: ignoring allocation, can't be allocated on any node", unassignedShard); - return UnassignedShardDecision.noDecision(UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.v1()), - "all nodes returned a " + allocateDecision.v1().type() + " decision for allocating the replica shard", + return ShardAllocationDecision.no(UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.v1().type()), + explain ? "all nodes returned a " + allocateDecision.v1().type() + " decision for allocating the replica shard" : null, allocateDecision.v2()); } - AsyncShardFetch.FetchResult shardStores = fetchData(unassignedShard, allocation); + AsyncShardFetch.FetchResult shardStores = fetchData(unassignedShard, allocation); if (shardStores.hasData() == false) { logger.trace("{}: ignoring allocation, still fetching shard stores", unassignedShard); allocation.setHasPendingAsyncFetch(); - return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA, - "still fetching shard state from the nodes in the cluster"); + return ShardAllocationDecision.no(AllocationStatus.FETCHING_SHARD_DATA, + explain ? "still fetching shard state from the nodes in the cluster" : null); } ShardRouting primaryShard = routingNodes.activePrimary(unassignedShard.shardId()); @@ -176,7 +175,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { // will try and recover from // Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", unassignedShard); - return UnassignedShardDecision.DECISION_NOT_TAKEN; + return ShardAllocationDecision.DECISION_NOT_TAKEN; } MatchingNodes matchingNodes = findMatchingNodes(unassignedShard, allocation, primaryStore, shardStores, explain); @@ -190,27 +189,28 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store", unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node()); // we are throttling this, as we have enough other shards to allocate to this node, so ignore it for now - return UnassignedShardDecision.throttleDecision( - "returned a THROTTLE decision on each node that has an existing copy of the shard, so waiting to re-use one " + - "of those copies", matchingNodes.nodeDecisions); + return ShardAllocationDecision.throttle( + explain ? "returned a THROTTLE decision on each node that has an existing copy of the shard, so waiting to re-use one of those copies" : null, + matchingNodes.nodeDecisions); } else { logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node()); // we found a match - return UnassignedShardDecision.yesDecision( + return ShardAllocationDecision.yes(nodeWithHighestMatch.nodeId(), "allocating to node [" + nodeWithHighestMatch.nodeId() + "] in order to re-use its unallocated persistent store", - nodeWithHighestMatch.nodeId(), null, matchingNodes.nodeDecisions); + null, + matchingNodes.nodeDecisions); } } else if (matchingNodes.hasAnyData() == false && unassignedShard.unassignedInfo().isDelayed()) { // if we didn't manage to find *any* data (regardless of matching sizes), and the replica is // unassigned due to a node leaving, so we delay allocation of this replica to see if the // node with the shard copy will rejoin so we can re-use the copy it has logger.debug("{}: allocation of [{}] is delayed", unassignedShard.shardId(), unassignedShard); - return UnassignedShardDecision.noDecision(AllocationStatus.DELAYED_ALLOCATION, - "not allocating this shard, no nodes contain data for the replica and allocation is delayed"); + return ShardAllocationDecision.no(AllocationStatus.DELAYED_ALLOCATION, + explain ? "not allocating this shard, no nodes contain data for the replica and allocation is delayed" : null); } - return UnassignedShardDecision.DECISION_NOT_TAKEN; + return ShardAllocationDecision.DECISION_NOT_TAKEN; } /** @@ -250,13 +250,13 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { /** * Finds the store for the assigned shard in the fetched data, returns null if none is found. */ - private TransportNodesListShardStoreMetaData.StoreFilesMetaData findStore(ShardRouting shard, RoutingAllocation allocation, AsyncShardFetch.FetchResult data) { + private TransportNodesListShardStoreMetaData.StoreFilesMetaData findStore(ShardRouting shard, RoutingAllocation allocation, AsyncShardFetch.FetchResult data) { assert shard.currentNodeId() != null; DiscoveryNode primaryNode = allocation.nodes().get(shard.currentNodeId()); if (primaryNode == null) { return null; } - TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData primaryNodeFilesStore = data.getData().get(primaryNode); + NodeStoreFilesMetaData primaryNodeFilesStore = data.getData().get(primaryNode); if (primaryNodeFilesStore == null) { return null; } @@ -265,11 +265,11 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation, TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore, - AsyncShardFetch.FetchResult data, + AsyncShardFetch.FetchResult data, boolean explain) { ObjectLongMap nodesToSize = new ObjectLongHashMap<>(); Map nodeDecisions = new HashMap<>(); - for (Map.Entry nodeStoreEntry : data.getData().entrySet()) { + for (Map.Entry nodeStoreEntry : data.getData().entrySet()) { DiscoveryNode discoNode = nodeStoreEntry.getKey(); TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData(); // we don't have any files at all, it is an empty index @@ -317,7 +317,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { return new MatchingNodes(nodesToSize, explain ? nodeDecisions : null); } - protected abstract AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); + protected abstract AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); static class MatchingNodes { private final ObjectLongMap nodesToSize; diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 24886dc72d3..3e6769200e9 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -45,9 +45,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.List; -/** - * - */ public class TransportNodesListGatewayMetaState extends TransportNodesAction globalCheckpointSyncer, - IndicesFieldDataCache indicesFieldDataCache) throws IOException { + public IndexService newIndexService( + NodeEnvironment environment, + IndexService.ShardStoreDeleter shardStoreDeleter, + CircuitBreakerService circuitBreakerService, + BigArrays bigArrays, + ThreadPool threadPool, + ScriptService scriptService, + IndicesQueriesRegistry indicesQueriesRegistry, + ClusterService clusterService, + Client client, + IndicesQueryCache indicesQueryCache, + MapperRegistry mapperRegistry, + Consumer globalCheckpointSyncer, + IndicesFieldDataCache indicesFieldDataCache) + throws IOException { final IndexEventListener eventListener = freeze(); IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get(); @@ -347,8 +365,9 @@ public final class IndexModule { queryCache = new DisabledQueryCache(indexSettings); } return new IndexService(indexSettings, environment, new SimilarityService(indexSettings, similarities), shardStoreDeleter, - analysisRegistry, engineFactory.get(), servicesProvider, queryCache, store, eventListener, searcherWrapperFactory, - mapperRegistry, indicesFieldDataCache, globalCheckpointSyncer, searchOperationListeners, indexOperationListeners); + analysisRegistry, engineFactory.get(), circuitBreakerService, bigArrays, threadPool, scriptService, indicesQueriesRegistry, + clusterService, client, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, indicesFieldDataCache, + globalCheckpointSyncer, searchOperationListeners, indexOperationListeners); } /** diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index d11a2d6922d..90b400d70d9 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -22,23 +22,18 @@ package org.elasticsearch.index; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.Query; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; @@ -53,11 +48,11 @@ import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShadowIndexShard; @@ -68,11 +63,12 @@ import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.indices.AliasFilterParsingException; -import org.elasticsearch.indices.InvalidAliasNameException; +import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; @@ -84,12 +80,12 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; +import java.util.function.LongSupplier; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -103,7 +99,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final BitsetFilterCache bitsetFilterCache; private final NodeEnvironment nodeEnv; private final ShardStoreDeleter shardStoreDeleter; - private final NodeServicesProvider nodeServicesProvider; private final IndexStore indexStore; private final IndexSearcherWrapper searcherWrapper; private final IndexCache indexCache; @@ -123,13 +118,23 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final ThreadPool threadPool; private final BigArrays bigArrays; private final AsyncGlobalCheckpointTask globalCheckpointTask; + private final ScriptService scriptService; + private final IndicesQueriesRegistry queryRegistry; + private final ClusterService clusterService; + private final Client client; public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv, SimilarityService similarityService, ShardStoreDeleter shardStoreDeleter, AnalysisRegistry registry, @Nullable EngineFactory engineFactory, - NodeServicesProvider nodeServicesProvider, + CircuitBreakerService circuitBreakerService, + BigArrays bigArrays, + ThreadPool threadPool, + ScriptService scriptService, + IndicesQueriesRegistry queryRegistry, + ClusterService clusterService, + Client client, QueryCache queryCache, IndexStore indexStore, IndexEventListener eventListener, @@ -145,15 +150,20 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust this.indexAnalyzers = registry.build(indexSettings); this.similarityService = similarityService; this.mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry, - IndexService.this::newQueryShardContext); - this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, - nodeServicesProvider.getCircuitBreakerService(), mapperService); + // we parse all percolator queries as they would be parsed on shard 0 + () -> newQueryShardContext(0, null, () -> { + throw new IllegalArgumentException("Percolator queries are not allowed to use the curent timestamp"); + })); + this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService); this.shardStoreDeleter = shardStoreDeleter; - this.bigArrays = nodeServicesProvider.getBigArrays(); - this.threadPool = nodeServicesProvider.getThreadPool(); + this.bigArrays = bigArrays; + this.threadPool = threadPool; + this.scriptService = scriptService; + this.queryRegistry = queryRegistry; + this.clusterService = clusterService; + this.client = client; this.eventListener = eventListener; this.nodeEnv = nodeEnv; - this.nodeServicesProvider = nodeServicesProvider; this.indexStore = indexStore; indexFieldData.setListener(new FieldDataCacheListener(this)); this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this)); @@ -412,7 +422,11 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } } finally { try { - store.close(); + if (store != null) { + store.close(); + } else { + logger.trace("[{}] store not initialized prior to closing shard, nothing to close", shardId); + } } catch (Exception e) { logger.warn( (Supplier) () -> new ParameterizedMessage( @@ -442,10 +456,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } } - public NodeServicesProvider getIndexServices() { - return nodeServicesProvider; - } - @Override public IndexSettings getIndexSettings() { return indexSettings; @@ -453,34 +463,41 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust /** * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via - * {@link QueryShardContext#setTypes(String...)} + * {@link QueryShardContext#setTypes(String...)}. + * + * Passing a {@code null} {@link IndexReader} will return a valid context, however it won't be able to make + * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. */ - public QueryShardContext newQueryShardContext(IndexReader indexReader) { + public QueryShardContext newQueryShardContext(int shardId, IndexReader indexReader, LongSupplier nowInMillis) { return new QueryShardContext( - indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(), - similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry(), - nodeServicesProvider.getClient(), indexReader, - nodeServicesProvider.getClusterService().state() - ); + shardId, indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(), + similarityService(), scriptService, queryRegistry, + client, indexReader, + clusterService.state(), + nowInMillis); } /** - * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via - * {@link QueryShardContext#setTypes(String...)}. This context may be used for query parsing but cannot be - * used for rewriting since it does not know about the current {@link IndexReader}. + * The {@link ThreadPool} to use for this index. */ - public QueryShardContext newQueryShardContext() { - return newQueryShardContext(null); - } - public ThreadPool getThreadPool() { return threadPool; } + /** + * The {@link BigArrays} to use for this index. + */ public BigArrays getBigArrays() { return bigArrays; } + /** + * The {@link ScriptService} to use for this index. + */ + public ScriptService getScriptService() { + return scriptService; + } + List getIndexOperationListeners() { // pkg private for testing return indexingOperationListeners; } @@ -579,64 +596,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } } - /** - * Returns the filter associated with listed filtering aliases. - *

    - * The list of filtering aliases should be obtained by calling MetaData.filteringAliases. - * Returns null if no filtering is required.

    - */ - public Query aliasFilter(QueryShardContext context, String... aliasNames) { - if (aliasNames == null || aliasNames.length == 0) { - return null; - } - final ImmutableOpenMap aliases = indexSettings.getIndexMetaData().getAliases(); - if (aliasNames.length == 1) { - AliasMetaData alias = aliases.get(aliasNames[0]); - if (alias == null) { - // This shouldn't happen unless alias disappeared after filteringAliases was called. - throw new InvalidAliasNameException(index(), aliasNames[0], "Unknown alias name was passed to alias Filter"); - } - return parse(alias, context); - } else { - // we need to bench here a bit, to see maybe it makes sense to use OrFilter - BooleanQuery.Builder combined = new BooleanQuery.Builder(); - for (String aliasName : aliasNames) { - AliasMetaData alias = aliases.get(aliasName); - if (alias == null) { - // This shouldn't happen unless alias disappeared after filteringAliases was called. - throw new InvalidAliasNameException(indexSettings.getIndex(), aliasNames[0], - "Unknown alias name was passed to alias Filter"); - } - Query parsedFilter = parse(alias, context); - if (parsedFilter != null) { - combined.add(parsedFilter, BooleanClause.Occur.SHOULD); - } else { - // The filter might be null only if filter was removed after filteringAliases was called - return null; - } - } - return combined.build(); - } - } - - private Query parse(AliasMetaData alias, QueryShardContext shardContext) { - if (alias.filter() == null) { - return null; - } - try { - byte[] filterSource = alias.filter().uncompressed(); - try (XContentParser parser = XContentFactory.xContent(filterSource).createParser(filterSource)) { - Optional innerQueryBuilder = shardContext.newParseContext(parser).parseInnerQueryBuilder(); - if (innerQueryBuilder.isPresent()) { - return shardContext.toFilter(innerQueryBuilder.get()).query(); - } - return null; - } - } catch (IOException ex) { - throw new AliasFilterParsingException(shardContext.index(), alias.getAlias(), "Invalid alias filter", ex); - } - } - public IndexMetaData getMetaData() { return indexSettings.getIndexMetaData(); } @@ -738,7 +697,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust if (shard.isRefreshNeeded()) { shard.refresh("schedule"); } - } catch (EngineClosedException | AlreadyClosedException ex) { + } catch (IndexShardClosedException | AlreadyClosedException ex) { // fine - continue; } continue; diff --git a/core/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java b/core/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java index 564988d0594..7cdd869821e 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java +++ b/core/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class IndexShardAlreadyExistsException extends ElasticsearchException { public IndexShardAlreadyExistsException(String message) { diff --git a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java index 439acb239a3..72068f3d1d2 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -45,8 +45,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; -/** - */ public final class IndexWarmer extends AbstractComponent { private final List listeners; diff --git a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 513e87878d6..215013bf246 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -133,23 +133,20 @@ public final class IndexingSlowLog implements IndexingOperationListener { this.reformat = reformat; } - @Override - public void postIndex(Engine.Index index, boolean created) { - final long took = index.endTime() - index.startTime(); - postIndexing(index.parsedDoc(), took); - } - - - private void postIndexing(ParsedDocument doc, long tookInNanos) { - if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) { - indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); - } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) { - indexLogger.info("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); - } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) { - indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); - } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) { - indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + public void postIndex(Engine.Index indexOperation, Engine.IndexResult result) { + if (result.hasFailure() == false) { + final ParsedDocument doc = indexOperation.parsedDoc(); + final long tookInNanos = result.getTook(); + if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) { + indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) { + indexLogger.info("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) { + indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) { + indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + } } } diff --git a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java index e04d3dc7a49..3707d9259b1 100644 --- a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java +++ b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java @@ -54,7 +54,7 @@ public final class MergeSchedulerConfig { public static final Setting MAX_THREAD_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_thread_count", - (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))), + (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.numberOfProcessors(s) / 2))), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), Property.Dynamic, Property.IndexScope); public static final Setting MAX_MERGE_COUNT_SETTING = diff --git a/core/src/main/java/org/elasticsearch/index/NodeServicesProvider.java b/core/src/main/java/org/elasticsearch/index/NodeServicesProvider.java deleted file mode 100644 index 866c938c0f5..00000000000 --- a/core/src/main/java/org/elasticsearch/index/NodeServicesProvider.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index; - -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.threadpool.ThreadPool; - -/** - * Simple provider class that holds the Index and Node level services used by - * a shard. - * This is just a temporary solution until we cleaned up index creation and removed injectors on that level as well. - */ -public final class NodeServicesProvider { - - private final ThreadPool threadPool; - private final BigArrays bigArrays; - private final Client client; - private final IndicesQueriesRegistry indicesQueriesRegistry; - private final ScriptService scriptService; - private final CircuitBreakerService circuitBreakerService; - private final ClusterService clusterService; - - @Inject - public NodeServicesProvider(ThreadPool threadPool, BigArrays bigArrays, Client client, ScriptService scriptService, - IndicesQueriesRegistry indicesQueriesRegistry, CircuitBreakerService circuitBreakerService, - ClusterService clusterService) { - this.threadPool = threadPool; - this.bigArrays = bigArrays; - this.client = client; - this.indicesQueriesRegistry = indicesQueriesRegistry; - this.scriptService = scriptService; - this.circuitBreakerService = circuitBreakerService; - this.clusterService = clusterService; - } - - public ThreadPool getThreadPool() { - return threadPool; - } - - public BigArrays getBigArrays() { return bigArrays; } - - public Client getClient() { - return client; - } - - public IndicesQueriesRegistry getIndicesQueriesRegistry() { - return indicesQueriesRegistry; - } - - public ScriptService getScriptService() { - return scriptService; - } - - public CircuitBreakerService getCircuitBreakerService() { - return circuitBreakerService; - } - - public ClusterService getClusterService() { - return clusterService; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/VersionType.java b/core/src/main/java/org/elasticsearch/index/VersionType.java index 062fbce10de..fcbd6690a38 100644 --- a/core/src/main/java/org/elasticsearch/index/VersionType.java +++ b/core/src/main/java/org/elasticsearch/index/VersionType.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.lucene.uid.Versions; import java.io.IOException; -/** - * - */ public enum VersionType implements Writeable { INTERNAL((byte) 0) { @Override @@ -198,6 +195,55 @@ public enum VersionType implements Writeable { return version >= 0L || version == Versions.MATCH_ANY; } + }, + /** + * Warning: this version type should be used with care. Concurrent indexing may result in loss of data on replicas + * + * @deprecated this will be removed in 7.0 and should not be used! It is *ONLY* for backward compatibility with 5.0 indices + */ + @Deprecated + FORCE((byte) 3) { + @Override + public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) { + if (currentVersion == Versions.NOT_FOUND) { + return false; + } + if (expectedVersion == Versions.MATCH_ANY) { + throw new IllegalStateException("you must specify a version when use VersionType.FORCE"); + } + return false; + } + + @Override + public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) { + throw new AssertionError("VersionType.FORCE should never result in a write conflict"); + } + + @Override + public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) { + return false; + } + + @Override + public String explainConflictForReads(long currentVersion, long expectedVersion) { + throw new AssertionError("VersionType.FORCE should never result in a read conflict"); + } + + @Override + public long updateVersion(long currentVersion, long expectedVersion) { + return expectedVersion; + } + + @Override + public boolean validateVersionForWrites(long version) { + return version >= 0L; + } + + @Override + public boolean validateVersionForReads(long version) { + return version >= 0L || version == Versions.MATCH_ANY; + } + }; private final byte value; @@ -291,6 +337,8 @@ public enum VersionType implements Writeable { return EXTERNAL; } else if ("external_gte".equals(versionType)) { return EXTERNAL_GTE; + } else if ("force".equals(versionType)) { + return FORCE; } throw new IllegalArgumentException("No version type match [" + versionType + "]"); } @@ -309,6 +357,8 @@ public enum VersionType implements Writeable { return EXTERNAL; } else if (value == 2) { return EXTERNAL_GTE; + } else if (value == 3) { + return FORCE; } throw new IllegalArgumentException("No version type match [" + value + "]"); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AbstractCharFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/AbstractCharFilterFactory.java index 2e728386ab8..fe4e757a9ae 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AbstractCharFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AbstractCharFilterFactory.java @@ -22,9 +22,6 @@ package org.elasticsearch.index.analysis; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; -/** - * - */ public abstract class AbstractCharFilterFactory extends AbstractIndexComponent implements CharFilterFactory { private final String name; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AbstractIndexAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/AbstractIndexAnalyzerProvider.java index c0406cb806e..d8c30df9bf4 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AbstractIndexAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AbstractIndexAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; -/** - * - */ public abstract class AbstractIndexAnalyzerProvider extends AbstractIndexComponent implements AnalyzerProvider { private final String name; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AbstractTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/AbstractTokenFilterFactory.java index 16096ca8f3f..b148adbd6ed 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AbstractTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AbstractTokenFilterFactory.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; -/** - * - */ public abstract class AbstractTokenFilterFactory extends AbstractIndexComponent implements TokenFilterFactory { private final String name; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AbstractTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/AbstractTokenizerFactory.java index dafc4b87730..dfa177a7fbf 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AbstractTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AbstractTokenizerFactory.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; -/** - * - */ public abstract class AbstractTokenizerFactory extends AbstractIndexComponent implements TokenizerFactory { private final String name; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 1d2098d4390..6dddf6eb57f 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -533,7 +532,7 @@ public final class AnalysisRegistry implements Closeable { // TODO: remove alias support completely when we no longer support pre 5.0 indices final String analyzerAliasKey = "index.analysis.analyzer." + analyzerFactory.name() + ".alias"; if (indexSettings.getSettings().get(analyzerAliasKey) != null) { - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0_alpha6)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0_beta1)) { // do not allow alias creation if the index was created on or after v5.0 alpha6 throw new IllegalArgumentException("setting [" + analyzerAliasKey + "] is not supported"); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java index ef49d5c8dac..b31d9851edf 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java @@ -22,9 +22,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.common.inject.Provider; -/** - * - */ public interface AnalyzerProvider extends Provider { String name(); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java index c4795d7a455..4ff06664f76 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java @@ -19,9 +19,6 @@ package org.elasticsearch.index.analysis; -/** - * - */ public enum AnalyzerScope { INDEX, INDICES, diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java index 4b185c450d5..fffa594b9e1 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/ArabicAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class ArabicAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final ArabicAnalyzer arabicAnalyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ArabicNormalizationFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/ArabicNormalizationFilterFactory.java index 265e050efee..15ed250e00c 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/ArabicNormalizationFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/ArabicNormalizationFilterFactory.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class ArabicNormalizationFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { public ArabicNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ArabicStemTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/ArabicStemTokenFilterFactory.java index 91a68386f61..909352aeae8 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/ArabicStemTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/ArabicStemTokenFilterFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class ArabicStemTokenFilterFactory extends AbstractTokenFilterFactory { public ArabicStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java index b58b8a87886..c53d9da8676 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/ArmenianAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class ArmenianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final ArmenianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java index 17f601084b8..cd91cb7522c 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/BasqueAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class BasqueAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final BasqueAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java index 872d1fb708a..c2640390f2d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/BrazilianAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class BrazilianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final BrazilianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java index efcc1bfba90..abbd89b3a14 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/BrazilianStemTokenFilterFactory.java @@ -26,9 +26,7 @@ import org.apache.lucene.analysis.CharArraySet; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ + public class BrazilianStemTokenFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet exclusions; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java index 4eddc84d310..b1017a1ee7d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/BulgarianAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class BulgarianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final BulgarianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java index cd03649febc..25328421f99 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/CatalanAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class CatalanAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final CatalanAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java index b76af08a0d1..68692c89469 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java @@ -21,9 +21,6 @@ package org.elasticsearch.index.analysis; import java.io.Reader; -/** - * - */ public interface CharFilterFactory { String name(); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java index 1728a4a3f7e..dd7f61f706c 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/CjkAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class CjkAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final CJKAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java index da1ca022685..eb6dae82061 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java @@ -27,9 +27,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet words; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java index b68a321359e..6185f358568 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzer.java @@ -25,9 +25,6 @@ import org.apache.lucene.analysis.Tokenizer; import java.io.Reader; -/** - * - */ public final class CustomAnalyzer extends Analyzer { private final TokenizerFactory tokenizerFactory; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java index f1487d198b5..f93e9e16e5e 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/CzechAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class CzechAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final CzechAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java index 041ca52c428..22e4dc07d0d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/DanishAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class DanishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final DanishAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/DelimitedPayloadTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/DelimitedPayloadTokenFilterFactory.java index d620e058e3e..12d0a041bd5 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/DelimitedPayloadTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/DelimitedPayloadTokenFilterFactory.java @@ -29,9 +29,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class DelimitedPayloadTokenFilterFactory extends AbstractTokenFilterFactory { public static final char DEFAULT_DELIMITER = '|'; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java index 1c33131624e..c747a9a5a3d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/DutchAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class DutchAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final DutchAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java index daa67b00d34..9a486227a22 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/DutchStemTokenFilterFactory.java @@ -28,9 +28,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.tartarus.snowball.ext.DutchStemmer; -/** - * - */ public class DutchStemTokenFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet exclusions; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java index 82ed526323d..9d287d90c83 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java @@ -28,9 +28,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory { private final int minGram; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java index 77d122393ce..cb696219f4e 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java @@ -28,9 +28,6 @@ import org.elasticsearch.index.IndexSettings; import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenChars; -/** - * - */ public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory { private final int minGram; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java index 0b94fb301a1..0859824370b 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java @@ -26,9 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { private final CharArraySet articles; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java index bcb7889253d..d124f27db57 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/EnglishAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class EnglishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final EnglishAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/FieldNameAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/FieldNameAnalyzer.java index 34829cedeed..dbb355ea51c 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/FieldNameAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/FieldNameAnalyzer.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.collect.CopyOnWriteHashMap; import java.util.Map; -/** - * - */ public final class FieldNameAnalyzer extends DelegatingAnalyzerWrapper { private final Map analyzers; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/FingerprintTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/FingerprintTokenFilterFactory.java index 9d41044abf7..55623e8f831 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/FingerprintTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/FingerprintTokenFilterFactory.java @@ -27,9 +27,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class FingerprintTokenFilterFactory extends AbstractTokenFilterFactory { private final char separator; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java index 3a2b3292480..5f728619f9e 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/FinnishAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class FinnishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final FinnishAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java index ff848dc681b..f312161819d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/FrenchAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class FrenchAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final FrenchAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java index e24dc86a22e..1a6ca73792e 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/FrenchStemTokenFilterFactory.java @@ -28,9 +28,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.tartarus.snowball.ext.FrenchStemmer; -/** - * - */ public class FrenchStemTokenFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet exclusions; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java index 6f6521f52f7..646fa9fa681 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/GalicianAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class GalicianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final GalicianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java index a55df0f1b8a..c11ac7eb9ba 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/GermanAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class GermanAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final GermanAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java index 72e66c29dfd..eedb59e9141 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/GermanStemTokenFilterFactory.java @@ -27,9 +27,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class GermanStemTokenFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet exclusions; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/GreekAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/GreekAnalyzerProvider.java index 4550af52cec..625acddfadc 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/GreekAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/GreekAnalyzerProvider.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class GreekAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final GreekAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java index 85e08764dc3..123117abced 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class HindiAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final HindiAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java index f5a09f2ce3a..99268ee7ee9 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class HungarianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final HungarianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java index b4c41a3ce3d..60f40612cd0 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class IndonesianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final IndonesianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java index fd2246a1d6f..38f7094bdeb 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/ItalianAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class ItalianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final ItalianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/KeywordAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/KeywordAnalyzerProvider.java index 0bf134cb380..a0982a0a8a2 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/KeywordAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/KeywordAnalyzerProvider.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class KeywordAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final KeywordAnalyzer keywordAnalyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java index ac44f106f51..a3707d9e44a 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class KeywordTokenizerFactory extends AbstractTokenizerFactory { private final int bufferSize; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java index 757c6d2f4e2..a288747ab4b 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class LatvianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final LatvianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java index e55e24ccae0..8a03802a7dd 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class LengthTokenFilterFactory extends AbstractTokenFilterFactory { private final int min; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java index bba9c424661..364c2367623 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class LetterTokenizerFactory extends AbstractTokenizerFactory { public LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactory.java index b3e3e8a19ca..ddf60911187 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class LimitTokenCountFilterFactory extends AbstractTokenFilterFactory { public static final int DEFAULT_MAX_TOKEN_COUNT = 1; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java index 961307f7015..16939f0d153 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory implements MultiTermAwareComponent { public LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java index 0905b310735..7926f585bc3 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java @@ -26,9 +26,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { private final int minGram; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java index 424aa04e548..17acddf55e1 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java @@ -33,9 +33,6 @@ import java.util.Map; import static java.util.Collections.unmodifiableMap; -/** - * - */ public class NGramTokenizerFactory extends AbstractTokenizerFactory { private final int minGram; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java index 1dd562c4bb1..416967e94f5 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java @@ -39,10 +39,6 @@ public class NamedAnalyzer extends DelegatingAnalyzerWrapper { this(analyzer.name(), analyzer.scope(), analyzer.analyzer(), positionIncrementGap); } - public NamedAnalyzer(String name, Analyzer analyzer) { - this(name, AnalyzerScope.INDEX, analyzer); - } - public NamedAnalyzer(String name, AnalyzerScope scope, Analyzer analyzer) { this(name, scope, analyzer, Integer.MIN_VALUE); } @@ -119,4 +115,12 @@ public class NamedAnalyzer extends DelegatingAnalyzerWrapper { public int hashCode() { return Objects.hash(name); } + + @Override + public void close() { + super.close(); + if (scope == AnalyzerScope.INDEX) { + analyzer.close(); + } + } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java index fb0b8e36cf8..65a7dff331f 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NorwegianAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class NorwegianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final NorwegianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java index c96d26676a4..b3bbb872f2a 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java @@ -30,9 +30,6 @@ import org.elasticsearch.index.IndexSettings; import java.util.regex.Pattern; -/** - * - */ public class PatternAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final PatternAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PersianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/PersianAnalyzerProvider.java index 66bebe579ac..2c5b427e51a 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/PersianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/PersianAnalyzerProvider.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class PersianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final PersianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PersianNormalizationFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/PersianNormalizationFilterFactory.java index 4f5751d985d..1a9644d611f 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/PersianNormalizationFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/PersianNormalizationFilterFactory.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class PersianNormalizationFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { public PersianNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PorterStemTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/PorterStemTokenFilterFactory.java index 8bb205f9646..82d3d7633a8 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/PorterStemTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/PorterStemTokenFilterFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class PorterStemTokenFilterFactory extends AbstractTokenFilterFactory { public PorterStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java index 919bdd933fe..1d5f0561e24 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class PortugueseAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final PortugueseAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProvider.java index af87d090de4..a6f0c132ba8 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProvider.java @@ -21,9 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Analyzer; -/** - * - */ public class PreBuiltAnalyzerProvider implements AnalyzerProvider { private final NamedAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java index 786e24a0844..3e59377ecc2 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java @@ -29,9 +29,6 @@ import org.elasticsearch.indices.analysis.PreBuiltAnalyzers; import java.io.IOException; -/** - * - */ public class PreBuiltAnalyzerProviderFactory implements AnalysisModule.AnalysisProvider> { private final PreBuiltAnalyzerProvider analyzerProvider; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ReverseTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/ReverseTokenFilterFactory.java index 8fad0a14c7f..1719841098d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/ReverseTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/ReverseTokenFilterFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class ReverseTokenFilterFactory extends AbstractTokenFilterFactory { public ReverseTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java index 9e08c638e5d..1ba780bf4cb 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class RomanianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final RomanianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java index 9478b7ff232..f7187cfb490 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class RussianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final RussianAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/RussianStemTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/RussianStemTokenFilterFactory.java index 12172497879..da38ed14176 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/RussianStemTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/RussianStemTokenFilterFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class RussianStemTokenFilterFactory extends AbstractTokenFilterFactory { public RussianStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SerbianNormalizationFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/SerbianNormalizationFilterFactory.java index 8fc6052247c..d839a822cab 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/SerbianNormalizationFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/SerbianNormalizationFilterFactory.java @@ -24,9 +24,7 @@ import org.apache.lucene.analysis.sr.SerbianNormalizationFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ + public class SerbianNormalizationFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { public SerbianNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java index 0c6ae7b9652..453d7bad896 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory { private final Factory factory; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SimpleAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/SimpleAnalyzerProvider.java index faa23cfcd35..6a7d530402f 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/SimpleAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/SimpleAnalyzerProvider.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class SimpleAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final SimpleAnalyzer simpleAnalyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java index ff95272e816..23f1b734469 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/SpanishAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class SpanishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final SpanishAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java index 2af7b5bbabf..a4ee6457b52 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StandardAnalyzerProvider.java @@ -27,9 +27,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class StandardAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final StandardAnalyzer standardAnalyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java index b9e25346b25..edc837293f9 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java @@ -26,9 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class StandardHtmlStripAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final StandardHtmlStripAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java index 2c6062b93f4..2339815b558 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java @@ -26,9 +26,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class StandardTokenFilterFactory extends AbstractTokenFilterFactory { public StandardTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java index 3f142a1ab43..ed8d2b452c2 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java @@ -26,8 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - */ public class StandardTokenizerFactory extends AbstractTokenizerFactory { private final int maxTokenLength; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java index 317b3e07850..bf83876259b 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java @@ -82,8 +82,6 @@ import org.tartarus.snowball.ext.SpanishStemmer; import org.tartarus.snowball.ext.SwedishStemmer; import org.tartarus.snowball.ext.TurkishStemmer; -/** - */ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory { private String language; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java index aca1fda299f..8c969238b47 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class StopAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final StopAnalyzer stopAnalyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java index 4e7c3ae1af8..86650e81915 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java @@ -31,9 +31,6 @@ import org.elasticsearch.index.IndexSettings; import java.util.Set; -/** - * - */ public class StopTokenFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet stopWords; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java index bbc14f474bb..b77e19efcaa 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class SwedishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final SwedishAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ThaiAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/ThaiAnalyzerProvider.java index cf4b9dbdb1e..f1a69c62b1e 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/ThaiAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/ThaiAnalyzerProvider.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class ThaiAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final ThaiAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java index 3d4866632cb..8c976646b85 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java @@ -21,9 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; -/** - * - */ public interface TokenFilterFactory { String name(); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java index f81ac97ba90..6ca9d457cbc 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java @@ -21,9 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Tokenizer; -/** - * - */ public interface TokenizerFactory { String name(); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java index c77467b2b41..4239f2444bc 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class TrimTokenFilterFactory extends AbstractTokenFilterFactory { private static final String UPDATE_OFFSETS_KEY = "update_offsets"; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java index 0a5a30cc28f..49ea7d6940d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class TruncateTokenFilterFactory extends AbstractTokenFilterFactory { private final int length; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java index 368dcbe3abd..2b2696a362b 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class TurkishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final TurkishAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java index 3e75d214bd3..79eb0c604d9 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java @@ -26,9 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory { private final int maxTokenLength; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java index eec70134c3f..986153a4577 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class UniqueTokenFilterFactory extends AbstractTokenFilterFactory { private final boolean onlyOnSamePosition; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/UpperCaseTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/UpperCaseTokenFilterFactory.java index c2074cb9393..551345fc2e1 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/UpperCaseTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/UpperCaseTokenFilterFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class UpperCaseTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { public UpperCaseTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/WhitespaceAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/WhitespaceAnalyzerProvider.java index c74c7a88dd6..5681a499f66 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/WhitespaceAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/WhitespaceAnalyzerProvider.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class WhitespaceAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final WhitespaceAnalyzer analyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java index 5263523314d..aa5acc8a985 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - * - */ public class WhitespaceTokenizerFactory extends AbstractTokenizerFactory { public WhitespaceTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java b/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java index 61733f24695..3e853076ca4 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java @@ -28,9 +28,6 @@ import org.elasticsearch.index.cache.query.QueryCache; import java.io.Closeable; import java.io.IOException; -/** - * - */ public class IndexCache extends AbstractIndexComponent implements Closeable { private final QueryCache queryCache; diff --git a/core/src/main/java/org/elasticsearch/index/cache/bitset/ShardBitsetFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/bitset/ShardBitsetFilterCache.java index 86a00fdbcf7..050c016118f 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/bitset/ShardBitsetFilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/bitset/ShardBitsetFilterCache.java @@ -24,8 +24,6 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.ShardId; -/** - */ public class ShardBitsetFilterCache extends AbstractIndexShardComponent { private final CounterMetric totalMetric = new CounterMetric(); diff --git a/core/src/main/java/org/elasticsearch/index/cache/query/DisabledQueryCache.java b/core/src/main/java/org/elasticsearch/index/cache/query/DisabledQueryCache.java index 730dc63b939..df5158b6d7a 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/query/DisabledQueryCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/query/DisabledQueryCache.java @@ -25,9 +25,6 @@ import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.cache.query.QueryCache; -/** - * - */ public class DisabledQueryCache extends AbstractIndexComponent implements QueryCache { public DisabledQueryCache(IndexSettings indexSettings) { diff --git a/core/src/main/java/org/elasticsearch/index/cache/query/QueryCache.java b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCache.java index bee947b54f0..318059d6a3f 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/query/QueryCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCache.java @@ -23,9 +23,6 @@ import org.elasticsearch.index.IndexComponent; import java.io.Closeable; -/** - * - */ public interface QueryCache extends IndexComponent, Closeable, org.apache.lucene.search.QueryCache { static class EntriesStats { diff --git a/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java index fcbb6190048..33b61a35138 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java +++ b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java @@ -29,8 +29,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -/** - */ public class QueryCacheStats implements Streamable, ToXContent { long ramBytesUsed; diff --git a/core/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java b/core/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java index 5eb9775b816..e444238ef75 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java +++ b/core/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java @@ -28,8 +28,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -/** - */ public class RequestCacheStats implements Streamable, ToXContent { long memorySize; diff --git a/core/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java index 068df25e2fd..8cad7823cb4 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java @@ -25,14 +25,12 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; /** - * + * Deprecated as not used in 6.0, should be removed in 7.0 + * Still exists for bwc in serializing/deserializing from + * 5.x nodes */ +@Deprecated public class DeleteFailedEngineException extends EngineException { - - public DeleteFailedEngineException(ShardId shardId, Engine.Delete delete, Throwable cause) { - super(shardId, "Delete failed for [" + delete.uid().text() + "]", cause); - } - public DeleteFailedEngineException(StreamInput in) throws IOException{ super(in); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java b/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java index baacc4b240d..b145a86e43d 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java +++ b/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java @@ -48,4 +48,12 @@ class DeleteVersionValue extends VersionValue { public long ramBytesUsed() { return BASE_RAM_BYTES_USED; } + + @Override + public String toString() { + return "DeleteVersionValue{" + + "version=" + version() + ", " + + "time=" + time + + '}'; + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/DocumentMissingException.java b/core/src/main/java/org/elasticsearch/index/engine/DocumentMissingException.java index 1f6b0cd501b..58e06e50a46 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/DocumentMissingException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/DocumentMissingException.java @@ -24,9 +24,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class DocumentMissingException extends EngineException { public DocumentMissingException(ShardId shardId, String type, String id) { diff --git a/core/src/main/java/org/elasticsearch/index/engine/DocumentSourceMissingException.java b/core/src/main/java/org/elasticsearch/index/engine/DocumentSourceMissingException.java index b0010162f4a..4fa53ed5a1e 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/DocumentSourceMissingException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/DocumentSourceMissingException.java @@ -24,9 +24,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class DocumentSourceMissingException extends EngineException { public DocumentSourceMissingException(ShardId shardId, String type, String id) { diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 56fe8dc9061..b63439f8a12 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -79,6 +79,7 @@ import java.util.Base64; import java.util.Comparator; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; @@ -279,9 +280,146 @@ public abstract class Engine implements Closeable { } } - public abstract void index(Index operation) throws EngineException; + /** + * Perform document index operation on the engine + * @param index operation to perform + * @return {@link IndexResult} containing updated translog location, version and + * document specific failures + * + * Note: engine level failures (i.e. persistent engine failures) are thrown + */ + public abstract IndexResult index(final Index index); - public abstract void delete(Delete delete) throws EngineException; + /** + * Perform document delete operation on the engine + * @param delete operation to perform + * @return {@link DeleteResult} containing updated translog location, version and + * document specific failures + * + * Note: engine level failures (i.e. persistent engine failures) are thrown + */ + public abstract DeleteResult delete(final Delete delete); + + /** + * Base class for index and delete operation results + * Holds result meta data (e.g. translog location, updated version) + * for an executed write {@link Operation} + **/ + public abstract static class Result { + private final Operation.TYPE operationType; + private final long version; + private final long seqNo; + private final Exception failure; + private final SetOnce freeze = new SetOnce<>(); + private Translog.Location translogLocation; + private long took; + + protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo) { + this.operationType = operationType; + this.failure = failure; + this.version = version; + this.seqNo = seqNo; + } + + protected Result(Operation.TYPE operationType, long version, long seqNo) { + this(operationType, null, version, seqNo); + } + + /** whether the operation had failure */ + public boolean hasFailure() { + return failure != null; + } + + /** get the updated document version */ + public long getVersion() { + return version; + } + + /** + * Get the sequence number on the primary. + * + * @return the sequence number + */ + public long getSeqNo() { + return seqNo; + } + + /** get the translog location after executing the operation */ + public Translog.Location getTranslogLocation() { + return translogLocation; + } + + /** get document failure while executing the operation {@code null} in case of no failure */ + public Exception getFailure() { + return failure; + } + + /** get total time in nanoseconds */ + public long getTook() { + return took; + } + + public Operation.TYPE getOperationType() { + return operationType; + } + + void setTranslogLocation(Translog.Location translogLocation) { + if (freeze.get() == null) { + assert failure == null : "failure has to be null to set translog location"; + this.translogLocation = translogLocation; + } else { + throw new IllegalStateException("result is already frozen"); + } + } + + void setTook(long took) { + if (freeze.get() == null) { + this.took = took; + } else { + throw new IllegalStateException("result is already frozen"); + } + } + + void freeze() { + freeze.set(true); + } + } + + public static class IndexResult extends Result { + private final boolean created; + + public IndexResult(long version, long seqNo, boolean created) { + super(Operation.TYPE.INDEX, version, seqNo); + this.created = created; + } + + public IndexResult(Exception failure, long version, long seqNo) { + super(Operation.TYPE.INDEX, failure, version, seqNo); + this.created = false; + } + + public boolean isCreated() { + return created; + } + } + + public static class DeleteResult extends Result { + private final boolean found; + + public DeleteResult(long version, long seqNo, boolean found) { + super(Operation.TYPE.DELETE, version, seqNo); + this.found = found; + } + + public DeleteResult(Exception failure, long version, long seqNo) { + super(Operation.TYPE.DELETE, failure, version, seqNo); + this.found = false; + } + + public boolean isFound() { + return found; + } + } /** * Attempts to do a special commit where the given syncID is put into the commit data. The attempt @@ -773,19 +911,33 @@ public abstract class Engine implements Closeable { } public abstract static class Operation { + + /** type of operation (index, delete), subclasses use static types */ + public enum TYPE { + INDEX, DELETE; + + private final String lowercase; + + TYPE() { + this.lowercase = this.toString().toLowerCase(Locale.ROOT); + } + + public String getLowercase() { + return lowercase; + } + } + private final Term uid; - private long version; - private long seqNo; + private final long version; + private final long seqNo; private final VersionType versionType; private final Origin origin; - private Translog.Location location; private final long startTime; - private long endTime; public Operation(Term uid, long seqNo, long version, VersionType versionType, Origin origin, long startTime) { this.uid = uid; assert origin != Origin.PRIMARY || seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO : "seqNo should not be set when origin is PRIMARY"; - assert origin == Origin.PRIMARY || seqNo >= 0 : "seqNo should be set when origin is not PRIMARY"; + assert origin == Origin.PRIMARY || seqNo >= 0 : "seqNo should be set when origin is not PRIMARY"; this.seqNo = seqNo; this.version = version; this.versionType = versionType; @@ -816,35 +968,11 @@ public abstract class Engine implements Closeable { return this.version; } - public void updateVersion(long version) { - this.version = version; - } - public long seqNo() { return seqNo; } - public void updateSeqNo(long seqNo) { - this.seqNo = seqNo; - } - - public void setTranslogLocation(Translog.Location location) { - this.location = location; - } - - public Translog.Location getTranslogLocation() { - return this.location; - } - - public int sizeInBytes() { - if (location != null) { - return location.size; - } else { - return estimatedSizeInBytes(); - } - } - - protected abstract int estimatedSizeInBytes(); + public abstract int estimatedSizeInBytes(); public VersionType versionType() { return this.versionType; @@ -857,20 +985,11 @@ public abstract class Engine implements Closeable { return this.startTime; } - public void endTime(long endTime) { - this.endTime = endTime; - } - - /** - * Returns operation end time in nanoseconds. - */ - public long endTime() { - return this.endTime; - } - - abstract String type(); + public abstract String type(); abstract String id(); + + abstract TYPE operationType(); } public static class Index extends Operation { @@ -878,7 +997,6 @@ public abstract class Engine implements Closeable { private final ParsedDocument doc; private final long autoGeneratedIdTimestamp; private final boolean isRetry; - private boolean created; public Index(Term uid, ParsedDocument doc, long seqNo, long version, VersionType versionType, Origin origin, long startTime, long autoGeneratedIdTimestamp, boolean isRetry) { @@ -910,6 +1028,11 @@ public abstract class Engine implements Closeable { return this.doc.id(); } + @Override + TYPE operationType() { + return TYPE.INDEX; + } + public String routing() { return this.doc.routing(); } @@ -922,18 +1045,6 @@ public abstract class Engine implements Closeable { return this.doc.ttl(); } - @Override - public void updateVersion(long version) { - super.updateVersion(version); - this.doc.version().setLongValue(version); - } - - @Override - public void updateSeqNo(long seqNo) { - super.updateSeqNo(seqNo); - this.doc.seqNo().setLongValue(seqNo); - } - public String parent() { return this.doc.parent(); } @@ -946,16 +1057,8 @@ public abstract class Engine implements Closeable { return this.doc.source(); } - public boolean isCreated() { - return created; - } - - public void setCreated(boolean created) { - this.created = created; - } - @Override - protected int estimatedSizeInBytes() { + public int estimatedSizeInBytes() { return (id().length() + type().length()) * 2 + source().length() + 12; } @@ -982,17 +1085,19 @@ public abstract class Engine implements Closeable { private final String type; private final String id; - private boolean found; - public Delete(String type, String id, Term uid, long seqNo, long version, VersionType versionType, Origin origin, long startTime, boolean found) { + public Delete(String type, String id, Term uid, long seqNo, long version, VersionType versionType, Origin origin, long startTime) { super(uid, seqNo, version, versionType, origin, startTime); this.type = type; this.id = id; - this.found = found; } public Delete(String type, String id, Term uid) { - this(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime(), false); + this(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); + } + + public Delete(Delete template, VersionType versionType) { + this(template.type(), template.id(), template.uid(), template.seqNo(), template.version(), versionType, template.origin(), template.startTime()); } @Override @@ -1005,20 +1110,15 @@ public abstract class Engine implements Closeable { return this.id; } - public void updateVersion(long version, boolean found) { - updateVersion(version); - this.found = found; - } - - public boolean found() { - return this.found; + @Override + TYPE operationType() { + return TYPE.DELETE; } @Override - protected int estimatedSizeInBytes() { + public int estimatedSizeInBytes() { return (uid().field().length() + uid().text().length()) * 2 + 20; } - } public static class Get { diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineException.java b/core/src/main/java/org/elasticsearch/index/engine/EngineException.java index 23f6be7ffd2..4d53deb8ee1 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineException.java @@ -25,9 +25,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ public class EngineException extends ElasticsearchException { public EngineException(ShardId shardId, String msg, Object... params) { diff --git a/core/src/main/java/org/elasticsearch/index/engine/FlushFailedEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/FlushFailedEngineException.java index 016e01c9c37..94aacdc3eae 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/FlushFailedEngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/FlushFailedEngineException.java @@ -24,9 +24,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ public class FlushFailedEngineException extends EngineException { public FlushFailedEngineException(ShardId shardId, Throwable t) { diff --git a/core/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java index 4728b7f899a..cd5e8a47406 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java @@ -27,22 +27,17 @@ import java.io.IOException; import java.util.Objects; /** - * + * Deprecated as not used in 6.0, should be removed in 7.0 + * Still exists for bwc in serializing/deserializing from + * 5.x nodes */ +@Deprecated public class IndexFailedEngineException extends EngineException { private final String type; private final String id; - public IndexFailedEngineException(ShardId shardId, String type, String id, Throwable cause) { - super(shardId, "Index failed for [" + type + "#" + id + "]", cause); - Objects.requireNonNull(type, "type must not be null"); - Objects.requireNonNull(id, "id must not be null"); - this.type = type; - this.id = id; - } - public IndexFailedEngineException(StreamInput in) throws IOException{ super(in); type = in.readString(); diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index cdecb35ae25..d9f32e40c95 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -35,6 +35,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; @@ -58,6 +59,7 @@ import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; @@ -133,7 +135,7 @@ public class InternalEngine extends Engine { public InternalEngine(EngineConfig engineConfig) throws EngineException { super(engineConfig); openMode = engineConfig.getOpenMode(); - if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_5_0_0_alpha6)) { + if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_5_0_0_beta1)) { // no optimization for pre 5.0.0.alpha6 since translog might not have all information needed maxUnsafeAutoIdTimestamp.set(Long.MAX_VALUE); } else { @@ -413,6 +415,16 @@ public class InternalEngine extends Engine { final long currentVersion, final long expectedVersion, final boolean deleted) { + if (op.versionType() == VersionType.FORCE) { + if (engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)) { + // If index was created in 5.0 or later, 'force' is not allowed at all + throw new IllegalArgumentException("version type [FORCE] may not be used for indices created after 6.0"); + } else if (op.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + // For earlier indices, 'force' is only allowed for translog recovery + throw new IllegalArgumentException("version type [FORCE] may not be used for non-translog operations"); + } + } + if (op.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) { if (op.origin().isRecovery()) { // version conflict, but okay @@ -436,52 +448,61 @@ public class InternalEngine extends Engine { return currentVersion; } - private void maybeUpdateSequenceNumber(Engine.Operation op) { - if (op.origin() == Operation.Origin.PRIMARY) { - op.updateSeqNo(seqNoService.generateSeqNo()); - } - } - - private static VersionValueSupplier NEW_VERSION_VALUE = (u, t) -> new VersionValue(u); - - @FunctionalInterface - private interface VersionValueSupplier { - VersionValue apply(long updatedVersion, long time); - } - - private void maybeAddToTranslog( - final T op, - final long updatedVersion, - final Function toTranslogOp, - final VersionValueSupplier toVersionValue) throws IOException { - if (op.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { - final Translog.Location translogLocation = translog.add(toTranslogOp.apply(op)); - op.setTranslogLocation(translogLocation); - } - versionMap.putUnderLock(op.uid().bytes(), toVersionValue.apply(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis())); - - } - @Override - public void index(Index index) { + public IndexResult index(Index index) { + IndexResult result; try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (index.origin().isRecovery()) { // Don't throttle recovery operations - innerIndex(index); + result = innerIndex(index); } else { try (Releasable r = throttle.acquireThrottle()) { - innerIndex(index); + result = innerIndex(index); } } - } catch (IllegalStateException | IOException e) { - try { - maybeFailEngine("index", e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw new IndexFailedEngineException(shardId, index.type(), index.id(), e); + } catch (Exception e) { + result = new IndexResult(checkIfDocumentFailureOrThrow(index, e), index.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO); } + return result; + } + + /** + * Inspects exception thrown when executing index or delete operations + * + * @return failure if the failure is a document specific failure (e.g. analysis chain failure) + * or throws Exception if the failure caused the engine to fail (e.g. out of disk, lucene tragic event) + * + * Note: pkg-private for testing + */ + final Exception checkIfDocumentFailureOrThrow(final Operation operation, final Exception failure) { + boolean isDocumentFailure; + try { + // When indexing a document into Lucene, Lucene distinguishes between environment related errors + // (like out of disk space) and document specific errors (like analysis chain problems) by setting + // the IndexWriter.getTragicEvent() value for the former. maybeFailEngine checks for these kind of + // errors and returns true if that is the case. We use that to indicate a document level failure + // and set the error in operation.setFailure. In case of environment related errors, the failure + // is bubbled up + isDocumentFailure = maybeFailEngine(operation.operationType().getLowercase(), failure) == false; + } catch (Exception inner) { + // we failed checking whether the failure can fail the engine, treat it as a persistent engine failure + isDocumentFailure = false; + failure.addSuppressed(inner); + } + if (isDocumentFailure) { + return failure; + } else { + // throw original exception in case the exception caused the engine to fail + rethrow(failure); + return null; + } + } + + // hack to rethrow original exception in case of engine level failures during index/delete operation + @SuppressWarnings("unchecked") + private static void rethrow(Throwable t) throws T { + throw (T) t; } private boolean canOptimizeAddDocument(Index index) { @@ -508,7 +529,10 @@ public class InternalEngine extends Engine { return false; } - private void innerIndex(Index index) throws IOException { + private IndexResult innerIndex(Index index) throws IOException { + final Translog.Location location; + final long updatedVersion; + IndexResult indexResult = null; try (Releasable ignored = acquireLock(index.uid())) { lastWriteNanos = index.startTime(); /* if we have an autoGeneratedID that comes into the engine we can potentially optimize @@ -540,7 +564,8 @@ public class InternalEngine extends Engine { // if anything is fishy here ie. there is a retry we go and force updateDocument below so we are updating the document in the // lucene index without checking the version map but we still do the version check final boolean forceUpdateDocument; - if (canOptimizeAddDocument(index)) { + final boolean canOptimizeAddDocument = canOptimizeAddDocument(index); + if (canOptimizeAddDocument) { long deOptimizeTimestamp = maxUnsafeAutoIdTimestamp.get(); if (index.isRetry()) { forceUpdateDocument = true; @@ -572,67 +597,91 @@ public class InternalEngine extends Engine { } } final long expectedVersion = index.version(); - if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) { - index.setCreated(false); - return; - } - maybeUpdateSequenceNumber(index); - final long updatedVersion = updateVersion(index, currentVersion, expectedVersion); - index.setCreated(deleted); - if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) { - // document does not exists, we can optimize for create - index(index, indexWriter); + // skip index operation because of version conflict on recovery + indexResult = new IndexResult(expectedVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, false); } else { - update(index, indexWriter); + final long seqNo; + if (index.origin() == Operation.Origin.PRIMARY) { + seqNo = seqNoService.generateSeqNo(); + } else { + seqNo = index.seqNo(); + } + updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion); + index.parsedDoc().version().setLongValue(updatedVersion); + if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) { + // document does not exists, we can optimize for create, but double check if assertions are running + assert assertDocDoesNotExist(index, canOptimizeAddDocument == false); + index(index.docs(), indexWriter); + } else { + update(index.uid(), index.docs(), indexWriter); + } + indexResult = new IndexResult(updatedVersion, seqNo, deleted); + location = index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY + ? translog.add(new Translog.Index(index, indexResult)) + : null; + versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion)); + indexResult.setTranslogLocation(location); } - maybeAddToTranslog(index, updatedVersion, Translog.Index::new, NEW_VERSION_VALUE); + indexResult.setTook(System.nanoTime() - index.startTime()); + indexResult.freeze(); + return indexResult; } finally { - if (index.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { - seqNoService.markSeqNoAsCompleted(index.seqNo()); + if (indexResult != null && indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + seqNoService.markSeqNoAsCompleted(indexResult.getSeqNo()); } } } - private long updateVersion(Engine.Operation op, long currentVersion, long expectedVersion) { - final long updatedVersion = op.versionType().updateVersion(currentVersion, expectedVersion); - op.updateVersion(updatedVersion); - return updatedVersion; - } - - private static void index(final Index index, final IndexWriter indexWriter) throws IOException { - if (index.docs().size() > 1) { - indexWriter.addDocuments(index.docs()); + private static void index(final List docs, final IndexWriter indexWriter) throws IOException { + if (docs.size() > 1) { + indexWriter.addDocuments(docs); } else { - indexWriter.addDocument(index.docs().get(0)); + indexWriter.addDocument(docs.get(0)); } } - private static void update(final Index index, final IndexWriter indexWriter) throws IOException { - if (index.docs().size() > 1) { - indexWriter.updateDocuments(index.uid(), index.docs()); + /** + * Asserts that the doc in the index operation really doesn't exist + */ + private boolean assertDocDoesNotExist(final Index index, final boolean allowDeleted) throws IOException { + final VersionValue versionValue = versionMap.getUnderLock(index.uid()); + if (versionValue != null) { + if (versionValue.delete() == false || allowDeleted == false) { + throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists in version map (version " + versionValue + ")"); + } } else { - indexWriter.updateDocument(index.uid(), index.docs().get(0)); + try (final Searcher searcher = acquireSearcher("assert doc doesn't exist")) { + final long docsWithId = searcher.searcher().count(new TermQuery(index.uid())); + if (docsWithId > 0) { + throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists [" + docsWithId + "] times in index"); + } + } + } + return true; + } + + private static void update(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { + if (docs.size() > 1) { + indexWriter.updateDocuments(uid, docs); + } else { + indexWriter.updateDocument(uid, docs.get(0)); } } @Override - public void delete(Delete delete) throws EngineException { + public DeleteResult delete(Delete delete) { + DeleteResult result; try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); // NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments: - innerDelete(delete); - } catch (IllegalStateException | IOException e) { - try { - maybeFailEngine("delete", e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw new DeleteFailedEngineException(shardId, delete, e); + result = innerDelete(delete); + } catch (Exception e) { + result = new DeleteResult(checkIfDocumentFailureOrThrow(delete, e), delete.version(), delete.seqNo()); } - maybePruneDeletedTombstones(); + return result; } private void maybePruneDeletedTombstones() { @@ -643,7 +692,11 @@ public class InternalEngine extends Engine { } } - private void innerDelete(Delete delete) throws IOException { + private DeleteResult innerDelete(Delete delete) throws IOException { + final Translog.Location location; + final long updatedVersion; + final boolean found; + DeleteResult deleteResult = null; try (Releasable ignored = acquireLock(delete.uid())) { lastWriteNanos = delete.startTime(); final long currentVersion; @@ -659,22 +712,37 @@ public class InternalEngine extends Engine { } final long expectedVersion = delete.version(); - if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) return; - - maybeUpdateSequenceNumber(delete); - final long updatedVersion = updateVersion(delete, currentVersion, expectedVersion); - final boolean found = deleteIfFound(delete, currentVersion, deleted, versionValue); - delete.updateVersion(updatedVersion, found); - - maybeAddToTranslog(delete, updatedVersion, Translog.Delete::new, DeleteVersionValue::new); + if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) { + // skip executing delete because of version conflict on recovery + deleteResult = new DeleteResult(expectedVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, true); + } else { + final long seqNo; + if (delete.origin() == Operation.Origin.PRIMARY) { + seqNo = seqNoService.generateSeqNo(); + } else { + seqNo = delete.seqNo(); + } + updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion); + found = deleteIfFound(delete.uid(), currentVersion, deleted, versionValue); + deleteResult = new DeleteResult(updatedVersion, seqNo, found); + location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY + ? translog.add(new Translog.Delete(delete, deleteResult)) + : null; + versionMap.putUnderLock(delete.uid().bytes(), + new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis())); + deleteResult.setTranslogLocation(location); + } + deleteResult.setTook(System.nanoTime() - delete.startTime()); + deleteResult.freeze(); + return deleteResult; } finally { - if (delete.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { - seqNoService.markSeqNoAsCompleted(delete.seqNo()); + if (deleteResult != null && deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + seqNoService.markSeqNoAsCompleted(deleteResult.getSeqNo()); } } } - private boolean deleteIfFound(Delete delete, long currentVersion, boolean deleted, VersionValue versionValue) throws IOException { + private boolean deleteIfFound(Term uid, long currentVersion, boolean deleted, VersionValue versionValue) throws IOException { final boolean found; if (currentVersion == Versions.NOT_FOUND) { // doc does not exist and no prior deletes @@ -684,7 +752,7 @@ public class InternalEngine extends Engine { found = false; } else { // we deleted a currently existing document - indexWriter.deleteDocuments(delete.uid()); + indexWriter.deleteDocuments(uid); found = true; } return found; @@ -1152,7 +1220,8 @@ public class InternalEngine extends Engine { } } - private IndexWriter createWriter(boolean create) throws IOException { + // pkg-private for testing + IndexWriter createWriter(boolean create) throws IOException { try { final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); iwc.setCommitOnClose(false); // we by default don't commit on close diff --git a/core/src/main/java/org/elasticsearch/index/engine/RecoveryEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/RecoveryEngineException.java index d55ed1d4189..a203962944b 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/RecoveryEngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/RecoveryEngineException.java @@ -25,9 +25,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ public class RecoveryEngineException extends EngineException { private final int phase; diff --git a/core/src/main/java/org/elasticsearch/index/engine/RefreshFailedEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/RefreshFailedEngineException.java index 51e2b257518..e1227d075d3 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/RefreshFailedEngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/RefreshFailedEngineException.java @@ -24,9 +24,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ public class RefreshFailedEngineException extends EngineException { public RefreshFailedEngineException(ShardId shardId, Throwable t) { diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index b9d7cb50764..248b9f95c25 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -107,12 +107,12 @@ public class ShadowEngine extends Engine { @Override - public void index(Index index) throws EngineException { + public IndexResult index(Index index) { throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine"); } @Override - public void delete(Delete delete) throws EngineException { + public DeleteResult delete(Delete delete) { throw new UnsupportedOperationException(shardId + " delete operation not allowed on shadow engine"); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java index c0296cf9db3..f669139c07e 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java @@ -24,9 +24,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ public class SnapshotFailedEngineException extends EngineException { public SnapshotFailedEngineException(ShardId shardId, Throwable cause) { diff --git a/core/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java index 9b038c6e77c..b743141d0c7 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java @@ -24,9 +24,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class VersionConflictEngineException extends EngineException { public VersionConflictEngineException(ShardId shardId, String type, String id, String explanation) { diff --git a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java index 662c88df5d9..5258b270091 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java +++ b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java @@ -57,4 +57,11 @@ class VersionValue implements Accountable { public Collection getChildResources() { return Collections.emptyList(); } + + @Override + public String toString() { + return "VersionValue{" + + "version=" + version + + '}'; + } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java b/core/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java index d8548f72476..56fe03d4395 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java @@ -30,8 +30,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -/** - */ public class FieldDataStats implements Streamable, ToXContent { long memorySize; diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java index abfe0d8e96a..d6573b930fa 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.fielddata; -/** - */ public interface IndexNumericFieldData extends IndexFieldData { public static enum NumericType { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 3991a37a8bf..403a1290546 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -291,4 +291,38 @@ public interface ScriptDocValues extends List { return geohashDistance(geohash); } } + + final class Booleans extends AbstractList implements ScriptDocValues { + + private final SortedNumericDocValues values; + + public Booleans(SortedNumericDocValues values) { + this.values = values; + } + + @Override + public void setNextDocId(int docId) { + values.setDocument(docId); + } + + @Override + public List getValues() { + return this; + } + + public boolean getValue() { + return values.count() != 0 && values.valueAt(0) == 1; + } + + @Override + public Boolean get(int index) { + return values.valueAt(index) == 1; + } + + @Override + public int size() { + return values.count(); + } + + } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java index 9e21562e8c7..d8eaaaf448e 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java @@ -29,8 +29,6 @@ import org.elasticsearch.index.shard.ShardId; import java.util.Map; import java.util.concurrent.ConcurrentMap; -/** - */ public class ShardFieldData implements IndexFieldDataCache.Listener { final CounterMetric evictionsMetric = new CounterMetric(); diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java index aa09bac4dcf..cfc0a0f3313 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java @@ -31,8 +31,6 @@ import org.apache.lucene.util.packed.PackedInts; import java.util.Collection; import java.util.Collections; -/** - */ public class SinglePackedOrdinals extends Ordinals { // ordinals with value 0 indicates no value diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicGeoPointFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicGeoPointFieldData.java index 175f041bd66..9660d9f8684 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicGeoPointFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicGeoPointFieldData.java @@ -28,8 +28,6 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import java.util.Collection; import java.util.Collections; -/** - */ public abstract class AbstractAtomicGeoPointFieldData implements AtomicGeoPointFieldData { @Override diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicOrdinalsFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicOrdinalsFieldData.java index 51de4c1be53..52688f6903e 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicOrdinalsFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicOrdinalsFieldData.java @@ -31,8 +31,6 @@ import java.util.Collection; import java.util.Collections; -/** - */ public abstract class AbstractAtomicOrdinalsFieldData implements AtomicOrdinalsFieldData { @Override diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicParentChildFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicParentChildFieldData.java index 1a801d75411..7c03e1a7942 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicParentChildFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractAtomicParentChildFieldData.java @@ -35,8 +35,6 @@ import java.util.Set; import static java.util.Collections.emptySet; -/** - */ abstract class AbstractAtomicParentChildFieldData implements AtomicParentChildFieldData { @Override diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java index b3b0604e9e2..c52ccb90bed 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java @@ -19,28 +19,24 @@ package org.elasticsearch.index.fielddata.plain; -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.util.Accountable; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; -import java.util.Collection; -import java.util.Collections; - - /** * Specialization of {@link AtomicNumericFieldData} for integers. */ abstract class AtomicLongFieldData implements AtomicNumericFieldData { private final long ramBytesUsed; + /** True if this numeric data is for a boolean field, and so only has values 0 and 1. */ + private final boolean isBoolean; - AtomicLongFieldData(long ramBytesUsed) { + AtomicLongFieldData(long ramBytesUsed, boolean isBoolean) { this.ramBytesUsed = ramBytesUsed; + this.isBoolean = isBoolean; } @Override @@ -50,7 +46,11 @@ abstract class AtomicLongFieldData implements AtomicNumericFieldData { @Override public final ScriptDocValues getScriptValues() { - return new ScriptDocValues.Longs(getLongValues()); + if (isBoolean) { + return new ScriptDocValues.Booleans(getLongValues()); + } else { + return new ScriptDocValues.Longs(getLongValues()); + } } @Override @@ -63,24 +63,6 @@ abstract class AtomicLongFieldData implements AtomicNumericFieldData { return FieldData.castToDouble(getLongValues()); } - public static AtomicNumericFieldData empty(final int maxDoc) { - return new AtomicLongFieldData(0) { - - @Override - public SortedNumericDocValues getLongValues() { - return DocValues.emptySortedNumeric(maxDoc); - } - - @Override - public Collection getChildResources() { - return Collections.emptyList(); - } - - }; - } - @Override - public void close() { - } - + public void close() {} } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayAtomicFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayAtomicFieldData.java index b1a97a878ee..0627e341a04 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayAtomicFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayAtomicFieldData.java @@ -36,9 +36,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -/** - * - */ public abstract class GeoPointArrayAtomicFieldData extends AbstractAtomicGeoPointFieldData { @Override public void close() { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java index d484c503c2b..18313f32745 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java @@ -68,7 +68,7 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData estimator.afterLoad(null, data.ramBytesUsed()); return data; } - return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0) == true) ? + return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0)) ? loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayLegacyAtomicFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayLegacyAtomicFieldData.java index 8d0953fb6e6..15f610e1ab7 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayLegacyAtomicFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayLegacyAtomicFieldData.java @@ -36,8 +36,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -/** - */ public abstract class GeoPointArrayLegacyAtomicFieldData extends AbstractAtomicGeoPointFieldData { @Override diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesAtomicFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesAtomicFieldData.java index 93a981cab2d..e46f04060b9 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesAtomicFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesAtomicFieldData.java @@ -31,8 +31,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -/** - */ public class PagedBytesAtomicFieldData extends AbstractAtomicOrdinalsFieldData { private final PagedBytes.Reader bytes; diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java index 5695f7ef15a..3b01f01eafb 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java @@ -46,8 +46,6 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import java.io.IOException; -/** - */ public class PagedBytesIndexFieldData extends AbstractIndexOrdinalsFieldData { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java index be877b9c68a..cf1fccabee0 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java @@ -96,7 +96,7 @@ public class SortedNumericDVIndexFieldData extends DocValuesIndexFieldData imple case DOUBLE: return new SortedNumericDoubleFieldData(reader, field); default: - return new SortedNumericLongFieldData(reader, field); + return new SortedNumericLongFieldData(reader, field, numericType == NumericType.BOOLEAN); } } @@ -117,8 +117,8 @@ public class SortedNumericDVIndexFieldData extends DocValuesIndexFieldData imple final LeafReader reader; final String field; - SortedNumericLongFieldData(LeafReader reader, String field) { - super(0L); + SortedNumericLongFieldData(LeafReader reader, String field, boolean isBoolean) { + super(0L, isBoolean); this.reader = reader; this.field = field; } diff --git a/core/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java b/core/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java index ee9634f690c..1316183f862 100644 --- a/core/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java +++ b/core/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java @@ -90,7 +90,7 @@ public class FieldsVisitor extends StoredFieldVisitor { } List fieldValues = entry.getValue(); for (int i = 0; i < fieldValues.size(); i++) { - fieldValues.set(i, fieldType.valueForSearch(fieldValues.get(i))); + fieldValues.set(i, fieldType.valueForDisplay(fieldValues.get(i))); } } } diff --git a/core/src/main/java/org/elasticsearch/index/fieldvisitor/JustUidFieldsVisitor.java b/core/src/main/java/org/elasticsearch/index/fieldvisitor/JustUidFieldsVisitor.java index 2a6c362274a..661d729b55f 100644 --- a/core/src/main/java/org/elasticsearch/index/fieldvisitor/JustUidFieldsVisitor.java +++ b/core/src/main/java/org/elasticsearch/index/fieldvisitor/JustUidFieldsVisitor.java @@ -23,8 +23,6 @@ import org.elasticsearch.index.mapper.UidFieldMapper; import java.io.IOException; -/** - */ public class JustUidFieldsVisitor extends FieldsVisitor { public JustUidFieldsVisitor() { diff --git a/core/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java b/core/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java index 2503286f710..556f43d06a1 100644 --- a/core/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java +++ b/core/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java @@ -27,8 +27,6 @@ import org.elasticsearch.index.mapper.UidFieldMapper; import java.io.IOException; import java.util.List; -/** - */ public class SingleFieldsVisitor extends FieldsVisitor { private String field; @@ -79,7 +77,7 @@ public class SingleFieldsVisitor extends FieldsVisitor { return; } for (int i = 0; i < fieldValues.size(); i++) { - fieldValues.set(i, fieldType.valueForSearch(fieldValues.get(i))); + fieldValues.set(i, fieldType.valueForDisplay(fieldValues.get(i))); } } } diff --git a/core/src/main/java/org/elasticsearch/index/get/GetField.java b/core/src/main/java/org/elasticsearch/index/get/GetField.java index 0ebbf1f9ac1..be3b8d6a257 100644 --- a/core/src/main/java/org/elasticsearch/index/get/GetField.java +++ b/core/src/main/java/org/elasticsearch/index/get/GetField.java @@ -29,9 +29,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -/** - * - */ public class GetField implements Streamable, Iterable { private String name; diff --git a/core/src/main/java/org/elasticsearch/index/get/GetResult.java b/core/src/main/java/org/elasticsearch/index/get/GetResult.java index b688ed44234..0f02885a251 100644 --- a/core/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/core/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -42,8 +42,6 @@ import java.util.Map; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.get.GetField.readGetField; -/** - */ public class GetResult implements Streamable, Iterable, ToXContent { private String index; diff --git a/core/src/main/java/org/elasticsearch/index/get/GetStats.java b/core/src/main/java/org/elasticsearch/index/get/GetStats.java index 10b4f64c19e..ed7057d33f0 100644 --- a/core/src/main/java/org/elasticsearch/index/get/GetStats.java +++ b/core/src/main/java/org/elasticsearch/index/get/GetStats.java @@ -28,8 +28,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -/** - */ public class GetStats implements Streamable, ToXContent { private long existsCount; diff --git a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java index c49fb2c3cc4..cfab3382c18 100644 --- a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -56,8 +56,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; -/** - */ public final class ShardGetService extends AbstractIndexShardComponent { private final MapperService mapperService; private final MeanMetric existsMetric = new MeanMetric(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java index 6c1477d8d07..90d9fbda1b5 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java @@ -42,9 +42,6 @@ import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenien import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeMapValue; import static org.elasticsearch.index.mapper.TypeParsers.parseTextField; -/** - * - */ public class AllFieldMapper extends MetadataFieldMapper { public static final String NAME = "_all"; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java index cb6fae8b59d..374540d03fe 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java @@ -43,9 +43,6 @@ import java.util.Map; import static org.elasticsearch.index.mapper.TypeParsers.parseField; -/** - * - */ public class BinaryFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "binary"; @@ -104,7 +101,7 @@ public class BinaryFieldMapper extends FieldMapper { @Override - public BytesReference valueForSearch(Object value) { + public BytesReference valueForDisplay(Object value) { if (value == null) { return null; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index b27f564f2d7..3a4ce5bd123 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.joda.time.DateTimeZone; @@ -164,7 +165,7 @@ public class BooleanFieldMapper extends FieldMapper { } @Override - public Boolean valueForSearch(Object value) { + public Boolean valueForDisplay(Object value) { if (value == null) { return null; } @@ -197,7 +198,7 @@ public class BooleanFieldMapper extends FieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { failIfNotIndexed(); return new TermRangeQuery(name(), lowerTerm == null ? null : indexedValueForSearch(lowerTerm), diff --git a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 09035bfa3ce..5a4edd39ac1 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.NumberType; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.search.suggest.completion.CompletionSuggester; import org.elasticsearch.search.suggest.completion.context.ContextMapping; @@ -209,7 +210,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp public NamedAnalyzer indexAnalyzer() { final NamedAnalyzer indexAnalyzer = super.indexAnalyzer(); if (indexAnalyzer != null && !(indexAnalyzer.analyzer() instanceof CompletionAnalyzer)) { - return new NamedAnalyzer(indexAnalyzer.name(), + return new NamedAnalyzer(indexAnalyzer.name(), AnalyzerScope.INDEX, new CompletionAnalyzer(indexAnalyzer, preserveSep, preservePositionIncrements)); } @@ -220,7 +221,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp public NamedAnalyzer searchAnalyzer() { final NamedAnalyzer searchAnalyzer = super.searchAnalyzer(); if (searchAnalyzer != null && !(searchAnalyzer.analyzer() instanceof CompletionAnalyzer)) { - return new NamedAnalyzer(searchAnalyzer.name(), + return new NamedAnalyzer(searchAnalyzer.name(), AnalyzerScope.INDEX, new CompletionAnalyzer(searchAnalyzer, preserveSep, preservePositionIncrements)); } return searchAnalyzer; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper2x.java b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper2x.java index 590ca0f8615..73a797954af 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper2x.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper2x.java @@ -55,9 +55,6 @@ import java.util.TreeMap; import static org.elasticsearch.index.mapper.TypeParsers.parseMultiField; -/** - * - */ public class CompletionFieldMapper2x extends FieldMapper { public static final String CONTENT_TYPE = "completion"; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 717f0361552..1915efcb214 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -43,9 +43,9 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.LegacyNumberFieldMapper.Defaults; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.internal.SearchContext; import org.joda.time.DateTimeZone; import java.io.IOException; @@ -54,8 +54,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.concurrent.Callable; - import static org.elasticsearch.index.mapper.TypeParsers.parseDateTimeFormatter; /** A {@link FieldMapper} for ip addresses. */ @@ -163,69 +161,6 @@ public class DateFieldMapper extends FieldMapper { } public static final class DateFieldType extends MappedFieldType { - - final class LateParsingQuery extends Query { - - final Object lowerTerm; - final Object upperTerm; - final boolean includeLower; - final boolean includeUpper; - final DateTimeZone timeZone; - final DateMathParser forcedDateParser; - - public LateParsingQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, - DateTimeZone timeZone, DateMathParser forcedDateParser) { - this.lowerTerm = lowerTerm; - this.upperTerm = upperTerm; - this.includeLower = includeLower; - this.includeUpper = includeUpper; - this.timeZone = timeZone; - this.forcedDateParser = forcedDateParser; - } - - @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); - if (rewritten != this) { - return rewritten; - } - return innerRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, timeZone, forcedDateParser); - } - - // Even though we only cache rewritten queries it is good to let all queries implement hashCode() and equals(): - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (sameClassAs(o) == false) return false; - - LateParsingQuery that = (LateParsingQuery) o; - if (includeLower != that.includeLower) return false; - if (includeUpper != that.includeUpper) return false; - if (lowerTerm != null ? !lowerTerm.equals(that.lowerTerm) : that.lowerTerm != null) return false; - if (upperTerm != null ? !upperTerm.equals(that.upperTerm) : that.upperTerm != null) return false; - if (timeZone != null ? !timeZone.equals(that.timeZone) : that.timeZone != null) return false; - - return true; - } - - @Override - public int hashCode() { - return Objects.hash(classHash(), lowerTerm, upperTerm, includeLower, includeUpper, timeZone); - } - - @Override - public String toString(String s) { - final StringBuilder sb = new StringBuilder(); - return sb.append(name()).append(':') - .append(includeLower ? '[' : '{') - .append((lowerTerm == null) ? "*" : lowerTerm.toString()) - .append(" TO ") - .append((upperTerm == null) ? "*" : upperTerm.toString()) - .append(includeUpper ? ']' : '}') - .toString(); - } - } - protected FormatDateTimeFormatter dateTimeFormatter; protected DateMathParser dateMathParser; @@ -301,7 +236,7 @@ public class DateFieldMapper extends FieldMapper { @Override public Query termQuery(Object value, @Nullable QueryShardContext context) { - Query query = innerRangeQuery(value, value, true, true, null, null); + Query query = innerRangeQuery(value, value, true, true, null, null, context); if (boost() != 1f) { query = new BoostQuery(query, boost()); } @@ -309,19 +244,19 @@ public class DateFieldMapper extends FieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { failIfNotIndexed(); - return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, null, null); + return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, null, null, context); } public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, - @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser) { + @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser, QueryShardContext context) { failIfNotIndexed(); - return new LateParsingQuery(lowerTerm, upperTerm, includeLower, includeUpper, timeZone, forcedDateParser); + return innerRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, timeZone, forcedDateParser, context); } Query innerRangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, - @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser) { + @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser, QueryShardContext context) { failIfNotIndexed(); DateMathParser parser = forcedDateParser == null ? dateMathParser @@ -330,7 +265,7 @@ public class DateFieldMapper extends FieldMapper { if (lowerTerm == null) { l = Long.MIN_VALUE; } else { - l = parseToMilliseconds(lowerTerm, !includeLower, timeZone, parser); + l = parseToMilliseconds(lowerTerm, !includeLower, timeZone, parser, context); if (includeLower == false) { ++l; } @@ -338,7 +273,7 @@ public class DateFieldMapper extends FieldMapper { if (upperTerm == null) { u = Long.MAX_VALUE; } else { - u = parseToMilliseconds(upperTerm, includeUpper, timeZone, parser); + u = parseToMilliseconds(upperTerm, includeUpper, timeZone, parser, context); if (includeUpper == false) { --u; } @@ -347,7 +282,7 @@ public class DateFieldMapper extends FieldMapper { } public long parseToMilliseconds(Object value, boolean roundUp, - @Nullable DateTimeZone zone, @Nullable DateMathParser forcedDateParser) { + @Nullable DateTimeZone zone, @Nullable DateMathParser forcedDateParser, QueryRewriteContext context) { DateMathParser dateParser = dateMathParser(); if (forcedDateParser != null) { dateParser = forcedDateParser; @@ -359,16 +294,7 @@ public class DateFieldMapper extends FieldMapper { } else { strValue = value.toString(); } - return dateParser.parse(strValue, now(), roundUp, zone); - } - - private static Callable now() { - return () -> { - final SearchContext context = SearchContext.current(); - return context != null - ? context.nowInMillis() - : System.currentTimeMillis(); - }; + return dateParser.parse(strValue, context::nowInMillis, roundUp, zone); } @Override @@ -390,7 +316,7 @@ public class DateFieldMapper extends FieldMapper { public Relation isFieldWithinQuery(IndexReader reader, Object from, Object to, boolean includeLower, boolean includeUpper, - DateTimeZone timeZone, DateMathParser dateParser) throws IOException { + DateTimeZone timeZone, DateMathParser dateParser, QueryRewriteContext context) throws IOException { if (dateParser == null) { dateParser = this.dateMathParser; } @@ -405,7 +331,7 @@ public class DateFieldMapper extends FieldMapper { long fromInclusive = Long.MIN_VALUE; if (from != null) { - fromInclusive = parseToMilliseconds(from, !includeLower, timeZone, dateParser); + fromInclusive = parseToMilliseconds(from, !includeLower, timeZone, dateParser, context); if (includeLower == false) { if (fromInclusive == Long.MAX_VALUE) { return Relation.DISJOINT; @@ -416,7 +342,7 @@ public class DateFieldMapper extends FieldMapper { long toInclusive = Long.MAX_VALUE; if (to != null) { - toInclusive = parseToMilliseconds(to, includeUpper, timeZone, dateParser); + toInclusive = parseToMilliseconds(to, includeUpper, timeZone, dateParser, context); if (includeUpper == false) { if (toInclusive == Long.MIN_VALUE) { return Relation.DISJOINT; @@ -441,7 +367,7 @@ public class DateFieldMapper extends FieldMapper { } @Override - public Object valueForSearch(Object value) { + public Object valueForDisplay(Object value) { Long val = (Long) value; if (val == null) { return null; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java index 57f2ff40530..12f968bb38b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java @@ -31,9 +31,6 @@ import java.util.Iterator; import java.util.Map; import java.util.Set; -/** - * - */ public final class DocumentFieldMappers implements Iterable { /** Full field name to mapper */ diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index eb6d6a9a3e8..b23c189e5bd 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -47,9 +47,6 @@ import java.util.Objects; import static java.util.Collections.emptyMap; -/** - * - */ public class DocumentMapper implements ToXContent { public static class Builder { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index 2cdeed9f040..50c7d98be92 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -69,7 +69,7 @@ public class DocumentMapperParser { } public Mapper.TypeParser.ParserContext parserContext(String type) { - return new Mapper.TypeParser.ParserContext(type, indexAnalyzers, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier.get()); + return new Mapper.TypeParser.ParserContext(type, indexAnalyzers, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier); } public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java b/core/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java index 08620ed8c45..197d06cd099 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java @@ -34,9 +34,6 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; -/** - * - */ public class DynamicTemplate implements ToXContent { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(DynamicTemplate.class)); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 4773d5da468..9128002eb5a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -246,7 +246,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable { super(simpleName); assert indexSettings != null; this.indexCreatedVersion = Version.indexCreated(indexSettings); - if (indexCreatedVersion.onOrAfter(Version.V_5_0_0_alpha6)) { + if (indexCreatedVersion.onOrAfter(Version.V_5_0_0_beta1)) { if (simpleName.isEmpty()) { throw new IllegalArgumentException("name cannot be empty string"); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java index e1615add19e..e4a1f5ec5eb 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java @@ -38,9 +38,6 @@ import java.util.List; import java.util.Map; -/** - * - */ public class IndexFieldMapper extends MetadataFieldMapper { public static final String NAME = "_index"; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 69a8e06f859..90740b794a8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -178,7 +178,7 @@ public class IpFieldMapper extends FieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { failIfNotIndexed(); InetAddress lower; if (lowerTerm == null) { @@ -231,7 +231,7 @@ public class IpFieldMapper extends FieldMapper { } @Override - public Object valueForSearch(Object value) { + public Object valueForDisplay(Object value) { if (value == null) { return null; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 204e61aabe6..1ba913790a4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -134,7 +134,7 @@ public final class KeywordFieldMapper extends FieldMapper { } node.put("index", index); } - + return new StringFieldMapper.TypeParser().parse(name, node, parserContext); } KeywordFieldMapper.Builder builder = new KeywordFieldMapper.Builder(name); @@ -196,7 +196,7 @@ public final class KeywordFieldMapper extends FieldMapper { } @Override - public Object valueForSearch(Object value) { + public Object valueForDisplay(Object value) { if (value == null) { return null; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LatLonPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LatLonPointFieldMapper.java index 9ec5970237a..647dd315a20 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LatLonPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LatLonPointFieldMapper.java @@ -34,7 +34,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; import java.io.IOException; -import java.util.Iterator; import java.util.Map; /** @@ -44,7 +43,7 @@ import java.util.Map; */ public class LatLonPointFieldMapper extends BaseGeoPointFieldMapper { public static final String CONTENT_TYPE = "geo_point"; - public static final Version LAT_LON_FIELD_VERSION = Version.V_5_0_0_alpha6; + public static final Version LAT_LON_FIELD_VERSION = Version.V_5_0_0_beta1; public static class Defaults extends BaseGeoPointFieldMapper.Defaults { public static final LatLonPointFieldType FIELD_TYPE = new LatLonPointFieldType(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyByteFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyByteFieldMapper.java index 2c63806ebbe..96bde70ae82 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyByteFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyByteFieldMapper.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.Iterator; @@ -47,9 +48,6 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeByteValue; import static org.elasticsearch.index.mapper.TypeParsers.parseNumberField; -/** - * - */ public class LegacyByteFieldMapper extends LegacyNumberFieldMapper { public static final String CONTENT_TYPE = "byte"; @@ -131,7 +129,7 @@ public class LegacyByteFieldMapper extends LegacyNumberFieldMapper { } @Override - public Byte valueForSearch(Object value) { + public Byte valueForDisplay(Object value) { if (value == null) { return null; } @@ -146,7 +144,7 @@ public class LegacyByteFieldMapper extends LegacyNumberFieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(), lowerTerm == null ? null : (int)parseValue(lowerTerm), upperTerm == null ? null : (int)parseValue(upperTerm), diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyDateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyDateFieldMapper.java index 29689d06dff..328d16e1e95 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyDateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyDateFieldMapper.java @@ -43,8 +43,9 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.LegacyLongFieldMapper.CustomLongNumericField; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.internal.SearchContext; import org.joda.time.DateTimeZone; import java.io.IOException; @@ -53,7 +54,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import static org.elasticsearch.index.mapper.TypeParsers.parseDateTimeFormatter; @@ -176,67 +176,6 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { public static class DateFieldType extends NumberFieldType { - final class LateParsingQuery extends Query { - - final Object lowerTerm; - final Object upperTerm; - final boolean includeLower; - final boolean includeUpper; - final DateTimeZone timeZone; - final DateMathParser forcedDateParser; - - public LateParsingQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, DateTimeZone timeZone, DateMathParser forcedDateParser) { - this.lowerTerm = lowerTerm; - this.upperTerm = upperTerm; - this.includeLower = includeLower; - this.includeUpper = includeUpper; - this.timeZone = timeZone; - this.forcedDateParser = forcedDateParser; - } - - @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); - if (rewritten != this) { - return rewritten; - } - return innerRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, timeZone, forcedDateParser); - } - - // Even though we only cache rewritten queries it is good to let all queries implement hashCode() and equals(): - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (sameClassAs(o) == false) return false; - - LateParsingQuery that = (LateParsingQuery) o; - if (includeLower != that.includeLower) return false; - if (includeUpper != that.includeUpper) return false; - if (lowerTerm != null ? !lowerTerm.equals(that.lowerTerm) : that.lowerTerm != null) return false; - if (upperTerm != null ? !upperTerm.equals(that.upperTerm) : that.upperTerm != null) return false; - if (timeZone != null ? !timeZone.equals(that.timeZone) : that.timeZone != null) return false; - - return true; - } - - @Override - public int hashCode() { - return Objects.hash(classHash(), lowerTerm, upperTerm, includeLower, includeUpper, timeZone); - } - - @Override - public String toString(String s) { - final StringBuilder sb = new StringBuilder(); - return sb.append(name()).append(':') - .append(includeLower ? '[' : '{') - .append((lowerTerm == null) ? "*" : lowerTerm.toString()) - .append(" TO ") - .append((upperTerm == null) ? "*" : upperTerm.toString()) - .append(includeUpper ? ']' : '}') - .toString(); - } - } - protected FormatDateTimeFormatter dateTimeFormatter = Defaults.DATE_TIME_FORMATTER; protected TimeUnit timeUnit = Defaults.TIME_UNIT; protected DateMathParser dateMathParser = new DateMathParser(dateTimeFormatter); @@ -339,7 +278,7 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { } @Override - public Object valueForSearch(Object value) { + public Object valueForDisplay(Object value) { Long val = (Long) value; if (val == null) { return null; @@ -348,8 +287,8 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { - return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, null, null); + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { + return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, null, null, context); } @Override @@ -366,14 +305,20 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { dateTimeFormatter(), minValue, maxValue); } - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser) { - return new LateParsingQuery(lowerTerm, upperTerm, includeLower, includeUpper, timeZone, forcedDateParser); + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, + @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser, QueryShardContext context) { + return innerRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, timeZone, forcedDateParser, context); } - private Query innerRangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser) { + private Query innerRangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, + @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser, QueryRewriteContext context) { return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(), - lowerTerm == null ? null : parseToMilliseconds(lowerTerm, !includeLower, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser), - upperTerm == null ? null : parseToMilliseconds(upperTerm, includeUpper, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser), + lowerTerm == null ? null + : parseToMilliseconds(lowerTerm, !includeLower, timeZone, + forcedDateParser == null ? dateMathParser : forcedDateParser, context), + upperTerm == null ? null + : parseToMilliseconds(upperTerm, includeUpper, timeZone, + forcedDateParser == null ? dateMathParser : forcedDateParser, context), includeLower, includeUpper); } @@ -381,7 +326,7 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { public Relation isFieldWithinQuery(IndexReader reader, Object from, Object to, boolean includeLower, boolean includeUpper, - DateTimeZone timeZone, DateMathParser dateParser) throws IOException { + DateTimeZone timeZone, DateMathParser dateParser, QueryRewriteContext context) throws IOException { if (dateParser == null) { dateParser = this.dateMathParser; } @@ -397,7 +342,7 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { long fromInclusive = Long.MIN_VALUE; if (from != null) { - fromInclusive = parseToMilliseconds(from, !includeLower, timeZone, dateParser); + fromInclusive = parseToMilliseconds(from, !includeLower, timeZone, dateParser, context); if (includeLower == false) { if (fromInclusive == Long.MAX_VALUE) { return Relation.DISJOINT; @@ -408,7 +353,7 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { long toInclusive = Long.MAX_VALUE; if (to != null) { - toInclusive = parseToMilliseconds(to, includeUpper, timeZone, dateParser); + toInclusive = parseToMilliseconds(to, includeUpper, timeZone, dateParser, context); if (includeUpper == false) { if (toInclusive == Long.MIN_VALUE) { return Relation.DISJOINT; @@ -426,7 +371,8 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { } } - public long parseToMilliseconds(Object value, boolean inclusive, @Nullable DateTimeZone zone, @Nullable DateMathParser forcedDateParser) { + public long parseToMilliseconds(Object value, boolean inclusive, @Nullable DateTimeZone zone, + @Nullable DateMathParser forcedDateParser, QueryRewriteContext context) { if (value instanceof Long) { return ((Long) value).longValue(); } @@ -442,7 +388,7 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { } else { strValue = value.toString(); } - return dateParser.parse(strValue, now(), inclusive, zone); + return dateParser.parse(strValue, context::nowInMillis, inclusive, zone); } @Override @@ -474,18 +420,6 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper { return (DateFieldType) super.fieldType(); } - private static Callable now() { - return new Callable() { - @Override - public Long call() { - final SearchContext context = SearchContext.current(); - return context != null - ? context.nowInMillis() - : System.currentTimeMillis(); - } - }; - } - @Override protected boolean customBoost() { return true; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyDoubleFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyDoubleFieldMapper.java index 07e459e8ea9..c0a6dd8bacf 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyDoubleFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyDoubleFieldMapper.java @@ -41,6 +41,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.Iterator; @@ -50,9 +51,6 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeDoubleValue; import static org.elasticsearch.index.mapper.TypeParsers.parseNumberField; -/** - * - */ public class LegacyDoubleFieldMapper extends LegacyNumberFieldMapper { public static final String CONTENT_TYPE = "double"; @@ -135,7 +133,7 @@ public class LegacyDoubleFieldMapper extends LegacyNumberFieldMapper { } @Override - public java.lang.Double valueForSearch(Object value) { + public java.lang.Double valueForDisplay(Object value) { if (value == null) { return null; } @@ -157,7 +155,7 @@ public class LegacyDoubleFieldMapper extends LegacyNumberFieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { return LegacyNumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(), lowerTerm == null ? null : parseDoubleValue(lowerTerm), upperTerm == null ? null : parseDoubleValue(upperTerm), diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyFloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyFloatFieldMapper.java index 3fbc639ea67..43307373cb3 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyFloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyFloatFieldMapper.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.Iterator; @@ -49,9 +50,6 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue; import static org.elasticsearch.index.mapper.TypeParsers.parseNumberField; -/** - * - */ public class LegacyFloatFieldMapper extends LegacyNumberFieldMapper { public static final String CONTENT_TYPE = "float"; @@ -142,7 +140,7 @@ public class LegacyFloatFieldMapper extends LegacyNumberFieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { return LegacyNumericRangeQuery.newFloatRange(name(), numericPrecisionStep(), lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java index 99ca07b06bf..fc46a08ce1a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyGeoPointFieldMapper.java @@ -297,7 +297,7 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement validPoint = true; } - if (coerce.value() == true && validPoint == false) { + if (coerce.value() && validPoint == false) { // by setting coerce to false we are assuming all geopoints are already in a valid coordinate system // thus this extra step can be skipped GeoUtils.normalizePoint(point, true, true); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIntegerFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIntegerFieldMapper.java index 65b9b65eaf9..b2016c75552 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIntegerFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIntegerFieldMapper.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.Iterator; @@ -48,9 +49,6 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue; import static org.elasticsearch.index.mapper.TypeParsers.parseNumberField; -/** - * - */ public class LegacyIntegerFieldMapper extends LegacyNumberFieldMapper { public static final String CONTENT_TYPE = "integer"; @@ -145,7 +143,7 @@ public class LegacyIntegerFieldMapper extends LegacyNumberFieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(), lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpFieldMapper.java index 699124a4c05..37c37cc1b80 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyIpFieldMapper.java @@ -55,9 +55,6 @@ import java.util.regex.Pattern; import static org.elasticsearch.index.mapper.TypeParsers.parseNumberField; -/** - * - */ public class LegacyIpFieldMapper extends LegacyNumberFieldMapper { public static final String CONTENT_TYPE = "ip"; @@ -171,7 +168,7 @@ public class LegacyIpFieldMapper extends LegacyNumberFieldMapper { * IPs should return as a string. */ @Override - public Object valueForSearch(Object value) { + public Object valueForDisplay(Object value) { Long val = (Long) value; if (val == null) { return null; @@ -210,14 +207,14 @@ public class LegacyIpFieldMapper extends LegacyNumberFieldMapper { } if (fromTo != null) { return rangeQuery(fromTo[0] == 0 ? null : fromTo[0], - fromTo[1] == MAX_IP ? null : fromTo[1], true, false); + fromTo[1] == MAX_IP ? null : fromTo[1], true, false, context); } } return super.termQuery(value, context); } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(), lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyLongFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyLongFieldMapper.java index 4661d1cd365..110259421c9 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyLongFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyLongFieldMapper.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.Iterator; @@ -48,9 +49,6 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeLongValue; import static org.elasticsearch.index.mapper.TypeParsers.parseNumberField; -/** - * - */ public class LegacyLongFieldMapper extends LegacyNumberFieldMapper { public static final String CONTENT_TYPE = "long"; @@ -146,7 +144,7 @@ public class LegacyLongFieldMapper extends LegacyNumberFieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(), lowerTerm == null ? null : parseLongValue(lowerTerm), upperTerm == null ? null : parseLongValue(upperTerm), diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapper.java index f377883aa24..20f248fdb5a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapper.java @@ -37,9 +37,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.joda.time.DateTimeZone; -/** - * - */ public abstract class LegacyNumberFieldMapper extends FieldMapper { // this is private since it has a different default private static final Setting COERCE_SETTING = diff --git a/core/src/main/java/org/elasticsearch/index/mapper/LegacyShortFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/LegacyShortFieldMapper.java index b42ec620aea..c15f149eb66 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/LegacyShortFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/LegacyShortFieldMapper.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.Iterator; @@ -48,9 +49,6 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeShortValue; import static org.elasticsearch.index.mapper.TypeParsers.parseNumberField; -/** - * - */ public class LegacyShortFieldMapper extends LegacyNumberFieldMapper { public static final String CONTENT_TYPE = "short"; @@ -135,7 +133,7 @@ public class LegacyShortFieldMapper extends LegacyNumberFieldMapper { } @Override - public Short valueForSearch(Object value) { + public Short valueForDisplay(Object value) { if (value == null) { return null; } @@ -150,7 +148,7 @@ public class LegacyShortFieldMapper extends LegacyNumberFieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(), lowerTerm == null ? null : (int)parseValue(lowerTerm), upperTerm == null ? null : (int)parseValue(upperTerm), diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 8796f8539d9..5b80986b142 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.similarity.SimilarityProvider; @@ -303,7 +304,7 @@ public abstract class MappedFieldType extends FieldType { /** Given a value that comes from the stored fields API, convert it to the * expected type. For instance a date field would store dates as longs and * format it back to a string in this method. */ - public Object valueForSearch(Object value) { + public Object valueForDisplay(Object value) { return value; } @@ -343,7 +344,7 @@ public abstract class MappedFieldType extends FieldType { return new ConstantScoreQuery(builder.build()); } - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { throw new IllegalArgumentException("Field [" + name + "] of type [" + typeName() + "] does not support range queries"); } @@ -399,10 +400,10 @@ public abstract class MappedFieldType extends FieldType { * {@link Relation#INTERSECTS}, which is always fine to return when there is * no way to check whether values are actually within bounds. */ public Relation isFieldWithinQuery( - IndexReader reader, - Object from, Object to, - boolean includeLower, boolean includeUpper, - DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + IndexReader reader, + Object from, Object to, + boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser, QueryRewriteContext context) throws IOException { return Relation.INTERSECTS; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 06928566424..83a20e03ffe 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.Version; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; @@ -31,6 +30,7 @@ import org.elasticsearch.index.similarity.SimilarityProvider; import java.util.Map; import java.util.Objects; import java.util.function.Function; +import java.util.function.Supplier; public abstract class Mapper implements ToXContent, Iterable { @@ -39,6 +39,7 @@ public abstract class Mapper implements ToXContent, Iterable { private final ContentPath contentPath; public BuilderContext(Settings indexSettings, ContentPath contentPath) { + Objects.requireNonNull(indexSettings, "indexSettings is required"); this.contentPath = contentPath; this.indexSettings = indexSettings; } @@ -47,16 +48,11 @@ public abstract class Mapper implements ToXContent, Iterable { return this.contentPath; } - @Nullable public Settings indexSettings() { return this.indexSettings; } - @Nullable public Version indexCreatedVersion() { - if (indexSettings == null) { - return null; - } return Version.indexCreated(indexSettings); } } @@ -97,11 +93,13 @@ public abstract class Mapper implements ToXContent, Iterable { private final ParseFieldMatcher parseFieldMatcher; - private final QueryShardContext queryShardContext; + private final Supplier queryShardContextSupplier; + private QueryShardContext queryShardContext; public ParserContext(String type, IndexAnalyzers indexAnalyzers, Function similarityLookupService, MapperService mapperService, Function typeParsers, - Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher, QueryShardContext queryShardContext) { + Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher, + Supplier queryShardContextSupplier) { this.type = type; this.indexAnalyzers = indexAnalyzers; this.similarityLookupService = similarityLookupService; @@ -109,7 +107,7 @@ public abstract class Mapper implements ToXContent, Iterable { this.typeParsers = typeParsers; this.indexVersionCreated = indexVersionCreated; this.parseFieldMatcher = parseFieldMatcher; - this.queryShardContext = queryShardContext; + this.queryShardContextSupplier = queryShardContextSupplier; } public String type() { @@ -141,6 +139,10 @@ public abstract class Mapper implements ToXContent, Iterable { } public QueryShardContext queryShardContext() { + // No need for synchronization, this class must be used in a single thread + if (queryShardContext == null) { + queryShardContext = queryShardContextSupplier.get(); + } return queryShardContext; } @@ -159,7 +161,7 @@ public abstract class Mapper implements ToXContent, Iterable { static class MultiFieldParserContext extends ParserContext { MultiFieldParserContext(ParserContext in) { - super(in.type(), in.indexAnalyzers, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in.queryShardContext()); + super(in.type(), in.indexAnalyzers, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in::queryShardContext); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperException.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperException.java index 0241f1c8e45..0a381de9f01 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperException.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class MapperException extends ElasticsearchException { public MapperException(StreamInput in) throws IOException { super(in); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperParsingException.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperParsingException.java index df886c5ce9d..9769c58f4ec 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperParsingException.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperParsingException.java @@ -24,9 +24,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class MapperParsingException extends MapperException { public MapperParsingException(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 2da082cba75..f2a958f6fcf 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -62,9 +62,6 @@ import static java.util.Collections.emptySet; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; -/** - * - */ public class MapperService extends AbstractIndexComponent { /** @@ -114,6 +111,7 @@ public class MapperService extends AbstractIndexComponent { private volatile FieldTypeLookup fieldTypes; private volatile Map fullPathObjectMappers = new HashMap<>(); private boolean hasNested = false; // updated dynamically to true when a nested object is added + private boolean allEnabled = false; // updated dynamically to true when _all is enabled private final DocumentMapperParser documentParser; @@ -153,6 +151,13 @@ public class MapperService extends AbstractIndexComponent { return this.hasNested; } + /** + * Returns true if the "_all" field is enabled for the type + */ + public boolean allEnabled() { + return this.allEnabled; + } + /** * returns an immutable iterator over current document mappers. * @@ -371,6 +376,7 @@ public class MapperService extends AbstractIndexComponent { this.hasNested = hasNested; this.fullPathObjectMappers = fullPathObjectMappers; this.parentTypes = parentTypes; + this.allEnabled = mapper.allFieldMapper().enabled(); assert assertSerialization(newMapper); assert assertMappersShareSameFieldType(); @@ -653,4 +659,5 @@ public class MapperService extends AbstractIndexComponent { return defaultAnalyzer; } } + } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 886b93fcf0e..b9bc3a2860a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -815,7 +815,7 @@ public class NumberFieldMapper extends FieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { failIfNotIndexed(); Query query = type.rangeQuery(name(), lowerTerm, upperTerm, includeLower, includeUpper); if (boost() != 1f) { @@ -836,7 +836,7 @@ public class NumberFieldMapper extends FieldMapper { } @Override - public Object valueForSearch(Object value) { + public Object valueForDisplay(Object value) { if (value == null) { return null; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index e063629bd61..60c264e5706 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -44,9 +44,6 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; -/** - * - */ public class ObjectMapper extends Mapper implements Cloneable { public static final String CONTENT_TYPE = "object"; @@ -325,7 +322,7 @@ public class ObjectMapper extends Mapper implements Cloneable { super(name); assert settings != null; Version indexCreatedVersion = Version.indexCreated(settings); - if (indexCreatedVersion.onOrAfter(Version.V_5_0_0_alpha6)) { + if (indexCreatedVersion.onOrAfter(Version.V_5_0_0_beta1)) { if (name.isEmpty()) { throw new IllegalArgumentException("name cannot be empty string"); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index bf343a6e0aa..0ae7ce33581 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -40,9 +40,6 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.index.mapper.TypeParsers.parseDateTimeFormatter; -/** - * - */ public class RootObjectMapper extends ObjectMapper { public static class Defaults { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java index aa3e78b8ee0..14978bc6728 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java @@ -32,9 +32,6 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; -/** - * - */ public class RoutingFieldMapper extends MetadataFieldMapper { public static final String NAME = "_routing"; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index 3608da30f76..6db82f7ff72 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -233,7 +233,7 @@ public class ScaledFloatFieldMapper extends FieldMapper { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { failIfNotIndexed(); Long lo = null; if (lowerTerm != null) { @@ -288,7 +288,7 @@ public class ScaledFloatFieldMapper extends FieldMapper { } @Override - public Object valueForSearch(Object value) { + public Object valueForDisplay(Object value) { if (value == null) { return null; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 63d4d958b36..defe0fa8cde 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -45,17 +45,16 @@ import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.function.Function; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; -/** - * - */ public class SourceFieldMapper extends MetadataFieldMapper { public static final String NAME = "_source"; public static final String CONTENT_TYPE = "_source"; + private final Function, Map> filter; public static class Defaults { public static final String NAME = SourceFieldMapper.NAME; @@ -190,6 +189,8 @@ public class SourceFieldMapper extends MetadataFieldMapper { this.enabled = enabled; this.includes = includes; this.excludes = excludes; + final boolean filtered = (includes != null && includes.length > 0) || (excludes != null && excludes.length > 0); + this.filter = enabled && filtered && fieldType().stored() ? XContentMapValues.filter(includes, excludes) : null; this.complete = enabled && includes == null && excludes == null; } @@ -239,12 +240,10 @@ public class SourceFieldMapper extends MetadataFieldMapper { return; } - boolean filtered = (includes != null && includes.length > 0) || (excludes != null && excludes.length > 0); - if (filtered) { + if (filter != null) { // we don't update the context source if we filter, we want to keep it as is... - Tuple> mapTuple = XContentHelper.convertToMap(source, true); - Map filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes); + Map filteredSource = filter.apply(mapTuple.v2()); BytesStreamOutput bStream = new BytesStreamOutput(); XContentType contentType = mapTuple.v1(); XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java b/core/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java index 074ce8829e3..14f6e9a8587 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; -/** - * - */ public class SourceToParse { public static SourceToParse source(String index, String type, String id, BytesReference source) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/StrictDynamicMappingException.java b/core/src/main/java/org/elasticsearch/index/mapper/StrictDynamicMappingException.java index f37ac8817ee..da1304d9b42 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/StrictDynamicMappingException.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/StrictDynamicMappingException.java @@ -23,8 +23,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - */ public class StrictDynamicMappingException extends MapperParsingException { public StrictDynamicMappingException(String path, String fieldName) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java index 9450c2a5b97..e8a11fc5d47 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java @@ -46,6 +46,7 @@ public abstract class StringFieldType extends TermBasedFieldType { super(ref); } + @Override public Query termsQuery(List values, QueryShardContext context) { failIfNotIndexed(); BytesRef[] bytesRefs = new BytesRef[values.size()]; @@ -85,7 +86,7 @@ public abstract class StringFieldType extends TermBasedFieldType { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { failIfNotIndexed(); return new TermRangeQuery(name(), lowerTerm == null ? null : indexedValueForSearch(lowerTerm), diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TTLFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TTLFieldMapper.java index f95f42156e1..fcb2fac9268 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TTLFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TTLFieldMapper.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.AlreadyExpiredException; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Date; @@ -139,15 +138,9 @@ public class TTLFieldMapper extends MetadataFieldMapper { // Overrides valueForSearch to display live value of remaining ttl @Override - public Object valueForSearch(Object value) { - long now; - SearchContext searchContext = SearchContext.current(); - if (searchContext != null) { - now = searchContext.nowInMillis(); - } else { - now = System.currentTimeMillis(); - } - Long val = (Long) super.valueForSearch(value); + public Object valueForDisplay(Object value) { + final long now = System.currentTimeMillis(); + Long val = (Long) super.valueForDisplay(value); return val - now; } } @@ -177,11 +170,6 @@ public class TTLFieldMapper extends MetadataFieldMapper { return this.defaultTTL; } - // Other implementation for realtime get display - public Object valueForSearch(long expirationTime) { - return expirationTime - System.currentTimeMillis(); - } - @Override public void preParse(ParseContext context) throws IOException { } @@ -273,4 +261,5 @@ public class TTLFieldMapper extends MetadataFieldMapper { } } } + } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TimestampFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TimestampFieldMapper.java index d57d2f89c6f..d933794efbc 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TimestampFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TimestampFieldMapper.java @@ -179,7 +179,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper { } @Override - public Object valueForSearch(Object value) { + public Object valueForDisplay(Object value) { return value; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index 0889fab6636..0e7df788a9f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -25,6 +25,9 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermContext; +import org.apache.lucene.queries.TermsQuery; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; @@ -41,13 +44,13 @@ import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; -/** - * - */ public class TypeFieldMapper extends MetadataFieldMapper { public static final String NAME = "_type"; @@ -147,7 +150,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { if (indexOptions() == IndexOptions.NONE) { throw new AssertionError(); } - return new TypeQuery(indexedValueForSearch(value)); + return new TypesQuery(indexedValueForSearch(value)); } @Override @@ -164,26 +167,55 @@ public class TypeFieldMapper extends MetadataFieldMapper { } } - public static class TypeQuery extends Query { + /** + * Specialization for a disjunction over many _type + */ + public static class TypesQuery extends Query { + // Same threshold as TermsQuery + private static final int BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD = 16; - private final BytesRef type; + private final BytesRef[] types; - public TypeQuery(BytesRef type) { - this.type = Objects.requireNonNull(type); + public TypesQuery(BytesRef... types) { + if (types == null) { + throw new NullPointerException("types cannot be null."); + } + if (types.length == 0) { + throw new IllegalArgumentException("types must contains at least one value."); + } + this.types = types; } @Override public Query rewrite(IndexReader reader) throws IOException { - Term term = new Term(CONTENT_TYPE, type); - TermContext context = TermContext.build(reader.getContext(), term); - if (context.docFreq() == reader.maxDoc()) { - // All docs have the same type. - // Using a match_all query will help Lucene perform some optimizations - // For instance, match_all queries as filter clauses are automatically removed - return new MatchAllDocsQuery(); - } else { - return new ConstantScoreQuery(new TermQuery(term, context)); + final int threshold = Math.min(BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD, BooleanQuery.getMaxClauseCount()); + if (types.length <= threshold) { + Set uniqueTypes = new HashSet<>(); + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + int totalDocFreq = 0; + for (BytesRef type : types) { + if (uniqueTypes.add(type)) { + Term term = new Term(CONTENT_TYPE, type); + TermContext context = TermContext.build(reader.getContext(), term); + if (context.docFreq() == 0) { + // this _type is not present in the reader + continue; + } + totalDocFreq += context.docFreq(); + // strict equality should be enough ? + if (totalDocFreq >= reader.maxDoc()) { + assert totalDocFreq == reader.maxDoc(); + // Matches all docs since _type is a single value field + // Using a match_all query will help Lucene perform some optimizations + // For instance, match_all queries as filter clauses are automatically removed + return new MatchAllDocsQuery(); + } + bq.add(new TermQuery(term, context), BooleanClause.Occur.SHOULD); + } + } + return new ConstantScoreQuery(bq.build()); } + return new TermsQuery(CONTENT_TYPE, types); } @Override @@ -191,20 +223,26 @@ public class TypeFieldMapper extends MetadataFieldMapper { if (sameClassAs(obj) == false) { return false; } - TypeQuery that = (TypeQuery) obj; - return type.equals(that.type); + TypesQuery that = (TypesQuery) obj; + return Arrays.equals(types, that.types); } @Override public int hashCode() { - return 31 * classHash() + type.hashCode(); + return 31 * classHash() + Arrays.hashCode(types); } @Override public String toString(String field) { - return "_type:" + type; + StringBuilder builder = new StringBuilder(); + for (BytesRef type : types) { + if (builder.length() > 0) { + builder.append(' '); + } + builder.append(new Term(CONTENT_TYPE, type).toString()); + } + return builder.toString(); } - } private TypeFieldMapper(Settings indexSettings, MappedFieldType existing) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index f192efc24a6..97828e2bfab 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -47,9 +47,6 @@ import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIn import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeMapValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; -/** - * - */ public class TypeParsers { public static final String DOC_VALUES = "doc_values"; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Uid.java b/core/src/main/java/org/elasticsearch/index/mapper/Uid.java index 2a8938b4ab7..3f6ca1b7f5a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Uid.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Uid.java @@ -21,16 +21,11 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.common.lucene.BytesRefs; import java.util.Collection; import java.util.Collections; -import java.util.List; -/** - * - */ public final class Uid { public static final char DELIMITER = '#'; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java index f27fa30b91c..ee6d76c2480 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java @@ -33,9 +33,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class UidFieldMapper extends MetadataFieldMapper { public static final String NAME = "_uid"; diff --git a/core/src/main/java/org/elasticsearch/index/merge/MergeStats.java b/core/src/main/java/org/elasticsearch/index/merge/MergeStats.java index ee8c08f00d0..845b035623d 100644 --- a/core/src/main/java/org/elasticsearch/index/merge/MergeStats.java +++ b/core/src/main/java/org/elasticsearch/index/merge/MergeStats.java @@ -29,9 +29,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -/** - * - */ public class MergeStats implements Streamable, ToXContent { private long total; diff --git a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java index c75ba1fda99..ef94ff16cf8 100644 --- a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -49,6 +51,8 @@ import java.util.Optional; public class FuzzyQueryBuilder extends AbstractQueryBuilder implements MultiTermQueryBuilder { public static final String NAME = "fuzzy"; + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(FuzzyQueryBuilder.class)); + /** Default maximum edit distance. Defaults to AUTO. */ public static final Fuzziness DEFAULT_FUZZINESS = Fuzziness.AUTO; @@ -151,6 +155,7 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder i * @param value The value of the term */ public FuzzyQueryBuilder(String fieldName, Object value) { + DEPRECATION_LOGGER.deprecated("{} query is deprecated. Instead use the [match] query with fuzziness parameter", NAME); if (Strings.isEmpty(fieldName)) { throw new IllegalArgumentException("field name cannot be null or empty"); } diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index fe5d5664123..1cfe2acb246 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -299,7 +299,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder frozen = new SetOnce<>(); public void setTypes(String... types) { this.types = types; @@ -86,11 +98,12 @@ public class QueryShardContext extends QueryRewriteContext { private NestedScope nestedScope; private boolean isFilter; - public QueryShardContext(IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, IndexFieldDataService indexFieldDataService, - MapperService mapperService, SimilarityService similarityService, ScriptService scriptService, - final IndicesQueriesRegistry indicesQueriesRegistry, Client client, - IndexReader reader, ClusterState clusterState) { - super(indexSettings, mapperService, scriptService, indicesQueriesRegistry, client, reader, clusterState); + public QueryShardContext(int shardId, IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, + IndexFieldDataService indexFieldDataService, MapperService mapperService, SimilarityService similarityService, + ScriptService scriptService, final IndicesQueriesRegistry indicesQueriesRegistry, Client client, + IndexReader reader, ClusterState clusterState, LongSupplier nowInMillis) { + super(indexSettings, mapperService, scriptService, indicesQueriesRegistry, client, reader, clusterState, nowInMillis); + this.shardId = shardId; this.indexSettings = indexSettings; this.similarityService = similarityService; this.mapperService = mapperService; @@ -99,12 +112,13 @@ public class QueryShardContext extends QueryRewriteContext { this.allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields(); this.indicesQueriesRegistry = indicesQueriesRegistry; this.nestedScope = new NestedScope(); + } public QueryShardContext(QueryShardContext source) { - this(source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService, + this(source.shardId, source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService, source.similarityService, source.scriptService, source.indicesQueriesRegistry, source.client, - source.reader, source.clusterState); + source.reader, source.clusterState, source.nowInMillis); this.types = source.getTypes(); } @@ -180,6 +194,10 @@ public class QueryShardContext extends QueryRewriteContext { this.isFilter = isFilter; } + /** + * Returns all the fields that match a given pattern. If prefixed with a + * type then the fields will be returned with a type prefix. + */ public Collection simpleMatchToIndexNames(String pattern) { return mapperService.simpleMatchToIndexNames(pattern); } @@ -250,24 +268,12 @@ public class QueryShardContext extends QueryRewriteContext { private SearchLookup lookup = null; public SearchLookup lookup() { - SearchContext current = SearchContext.current(); - if (current != null) { - return current.lookup(); - } if (lookup == null) { - lookup = new SearchLookup(getMapperService(), indexFieldDataService, null); + lookup = new SearchLookup(getMapperService(), indexFieldDataService, types); } return lookup; } - public long nowInMillis() { - SearchContext current = SearchContext.current(); - if (current != null) { - return current.nowInMillis(); - } - return System.currentTimeMillis(); - } - public NestedScope nestedScope() { return nestedScope; } @@ -327,4 +333,99 @@ public class QueryShardContext extends QueryRewriteContext { public final Index index() { return indexSettings.getIndex(); } + + /** + * Compiles (or retrieves from cache) and binds the parameters to the + * provided script + */ + public final SearchScript getSearchScript(Script script, ScriptContext context) { + failIfFrozen(); + return scriptService.search(lookup(), script, context); + } + /** + * Returns a lazily created {@link SearchScript} that is compiled immediately but can be pulled later once all + * parameters are available. + */ + public final Function, SearchScript> getLazySearchScript(Script script, ScriptContext context) { + failIfFrozen(); + CompiledScript compile = scriptService.compile(script, context, script.getOptions()); + return (p) -> scriptService.search(lookup(), compile, p); + } + + /** + * Compiles (or retrieves from cache) and binds the parameters to the + * provided script + */ + public final ExecutableScript getExecutableScript(Script script, ScriptContext context) { + failIfFrozen(); + return scriptService.executable(script, context); + } + + /** + * Returns a lazily created {@link ExecutableScript} that is compiled immediately but can be pulled later once all + * parameters are available. + */ + public final Function, ExecutableScript> getLazyExecutableScript(Script script, ScriptContext context) { + failIfFrozen(); + CompiledScript executable = scriptService.compile(script, context, script.getOptions()); + return (p) -> scriptService.executable(executable, p); + } + + /** + * if this method is called the query context will throw exception if methods are accessed + * that could yield different results across executions like {@link #getTemplateBytes(Script)} + */ + public final void freezeContext() { + this.frozen.set(Boolean.TRUE); + } + + /** + * This method fails if {@link #freezeContext()} is called before on this + * context. This is used to seal. + * + * This methods and all methods that call it should be final to ensure that + * setting the request as not cacheable and the freezing behaviour of this + * class cannot be bypassed. This is important so we can trust when this + * class says a request can be cached. + */ + protected final void failIfFrozen() { + this.cachable = false; + if (frozen.get() == Boolean.TRUE) { + throw new IllegalArgumentException("features that prevent cachability are disabled on this context"); + } else { + assert frozen.get() == null : frozen.get(); + } + } + + @Override + public final BytesReference getTemplateBytes(Script template) { + failIfFrozen(); + return super.getTemplateBytes(template); + } + + /** + * Returns true iff the result of the processed search request is cachable. Otherwise false + */ + public final boolean isCachable() { + return cachable; + } + + /** + * Returns the shard ID this context was created for. + */ + public int getShardId() { + return shardId; + } + + @Override + public final long nowInMillis() { + failIfFrozen(); + return super.nowInMillis(); + } + + @Override + public Client getClient() { + failIfFrozen(); // we somebody uses a terms filter with lookup for instance can't be cached... + return super.getClient(); + } } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index 807343237d2..08318874df2 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -36,17 +37,30 @@ import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.IpFieldMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.ScaledFloatFieldMapper; +import org.elasticsearch.index.mapper.StringFieldMapper; +import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.mapper.TimestampFieldMapper; import org.elasticsearch.index.query.support.QueryParsers; import org.joda.time.DateTimeZone; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Set; import java.util.TreeMap; /** @@ -59,9 +73,10 @@ import java.util.TreeMap; public class QueryStringQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "query_string"; + public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099); + public static final boolean DEFAULT_AUTO_GENERATE_PHRASE_QUERIES = false; public static final int DEFAULT_MAX_DETERMINED_STATES = Operations.DEFAULT_MAX_DETERMINIZED_STATES; - public static final boolean DEFAULT_LOWERCASE_EXPANDED_TERMS = true; public static final boolean DEFAULT_ENABLE_POSITION_INCREMENTS = true; public static final boolean DEFAULT_ESCAPE = false; public static final boolean DEFAULT_USE_DIS_MAX = true; @@ -71,7 +86,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder ALLOWED_QUERY_MAPPER_TYPES; + + static { + ALLOWED_QUERY_MAPPER_TYPES = new HashSet<>(); + ALLOWED_QUERY_MAPPER_TYPES.add(DateFieldMapper.CONTENT_TYPE); + ALLOWED_QUERY_MAPPER_TYPES.add(IpFieldMapper.CONTENT_TYPE); + ALLOWED_QUERY_MAPPER_TYPES.add(KeywordFieldMapper.CONTENT_TYPE); + for (NumberFieldMapper.NumberType nt : NumberFieldMapper.NumberType.values()) { + ALLOWED_QUERY_MAPPER_TYPES.add(nt.typeName()); + } + ALLOWED_QUERY_MAPPER_TYPES.add(ScaledFloatFieldMapper.CONTENT_TYPE); + ALLOWED_QUERY_MAPPER_TYPES.add(StringFieldMapper.CONTENT_TYPE); + ALLOWED_QUERY_MAPPER_TYPES.add(TextFieldMapper.CONTENT_TYPE); + ALLOWED_QUERY_MAPPER_TYPES.add(TimestampFieldMapper.CONTENT_TYPE); + } private final String queryString; @@ -126,12 +161,8 @@ public class QueryStringQueryBuilder extends AbstractQueryBuildertrue. - */ - public QueryStringQueryBuilder lowercaseExpandedTerms(boolean lowercaseExpandedTerms) { - this.lowercaseExpandedTerms = lowercaseExpandedTerms; - return this; - } - - public boolean lowercaseExpandedTerms() { - return this.lowercaseExpandedTerms; - } - /** * Set to true to enable position increments in result query. Defaults to * true. @@ -473,6 +527,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuildertrue to enable analysis on wildcard and prefix queries. */ @@ -485,11 +544,6 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder{@value #DEFAULT_SPLIT_ON_WHITESPACE}. + */ + public QueryStringQueryBuilder splitOnWhitespace(boolean value) { + this.splitOnWhitespace = value; + return this; + } + + public boolean splitOnWhitespace() { + return splitOnWhitespace; + } + @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); @@ -597,7 +655,6 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder fieldsAndWeights = new HashMap<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -707,7 +767,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder allQueryableDefaultFields(QueryShardContext context) { + Collection allFields = context.simpleMatchToIndexNames("*"); + Map fields = new HashMap<>(); + for (String fieldName : allFields) { + if (MapperService.isMetadataField(fieldName)) { + // Ignore our metadata fields + continue; + } + MappedFieldType mft = context.fieldMapper(fieldName); + assert mft != null : "should never have a null mapper for an existing field"; + + // Ignore fields that are not in the allowed mapper types. Some + // types do not support term queries, and thus we cannot generate + // a special query for them. + String mappingType = mft.typeName(); + if (ALLOWED_QUERY_MAPPER_TYPES.contains(mappingType)) { + fields.put(fieldName, 1.0f); + } + } + return fields; } @Override @@ -849,18 +944,43 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder resolvedFields = new TreeMap<>(); - for (Map.Entry fieldsEntry : fieldsAndWeights.entrySet()) { - String fieldName = fieldsEntry.getKey(); - Float weight = fieldsEntry.getValue(); - if (Regex.isSimpleMatchPattern(fieldName)) { - for (String resolvedFieldName : context.getMapperService().simpleMatchToIndexNames(fieldName)) { - resolvedFields.put(resolvedFieldName, weight); + + if ((useAllFields != null && useAllFields) && (fieldsAndWeights.size() != 0 || this.defaultField != null)) { + throw addValidationError("cannot use [all_fields] parameter in conjunction with [default_field] or [fields]", null); + } + + // If explicitly required to use all fields, use all fields, OR: + // Automatically determine the fields (to replace the _all field) if all of the following are true: + // - The _all field is disabled, + // - and the default_field has not been changed in the settings + // - and default_field is not specified in the request + // - and no fields are specified in the request + if ((this.useAllFields != null && this.useAllFields) || + (context.getMapperService().allEnabled() == false && + "_all".equals(context.defaultField()) && + this.defaultField == null && + this.fieldsAndWeights.size() == 0)) { + // Use the automatically determined expansion of all queryable fields + resolvedFields = allQueryableDefaultFields(context); + // Automatically set leniency to "true" so mismatched fields don't cause exceptions + qpSettings.lenient(true); + } else { + qpSettings.defaultField(this.defaultField == null ? context.defaultField() : this.defaultField); + + for (Map.Entry fieldsEntry : fieldsAndWeights.entrySet()) { + String fieldName = fieldsEntry.getKey(); + Float weight = fieldsEntry.getValue(); + if (Regex.isSimpleMatchPattern(fieldName)) { + for (String resolvedFieldName : context.getMapperService().simpleMatchToIndexNames(fieldName)) { + resolvedFields.put(resolvedFieldName, weight); + } + } else { + resolvedFields.put(fieldName, weight); } - } else { - resolvedFields.put(fieldName, weight); } + qpSettings.lenient(lenient == null ? context.queryStringLenient() : lenient); } qpSettings.fieldsAndWeights(resolvedFields); qpSettings.defaultOperator(defaultOperator.toQueryParserOperator()); @@ -890,9 +1010,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder i return this.timeZone == null ? null : this.timeZone.getID(); } + DateTimeZone getDateTimeZone() { // for testing + return timeZone; + } + /** * In case of format field, we can parse the from/to fields using this time format */ @@ -278,6 +282,13 @@ public class RangeQueryBuilder extends AbstractQueryBuilder i return this.format == null ? null : this.format.format(); } + DateMathParser getForceDateParser() { // pkg private for testing + if (this.format != null) { + return new DateMathParser(this.format); + } + return null; + } + @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); @@ -406,7 +417,7 @@ public class RangeQueryBuilder extends AbstractQueryBuilder i } else { DateMathParser dateMathParser = format == null ? null : new DateMathParser(format); return fieldType.isFieldWithinQuery(queryRewriteContext.getIndexReader(), from, to, includeLower, - includeUpper, timeZone, dateMathParser); + includeUpper, timeZone, dateMathParser, queryRewriteContext); } } @@ -440,26 +451,20 @@ public class RangeQueryBuilder extends AbstractQueryBuilder i MappedFieldType mapper = context.fieldMapper(this.fieldName); if (mapper != null) { if (mapper instanceof LegacyDateFieldMapper.DateFieldType) { - DateMathParser forcedDateParser = null; - if (this.format != null) { - forcedDateParser = new DateMathParser(this.format); - } + query = ((LegacyDateFieldMapper.DateFieldType) mapper).rangeQuery(from, to, includeLower, includeUpper, - timeZone, forcedDateParser); + timeZone, getForceDateParser(), context); } else if (mapper instanceof DateFieldMapper.DateFieldType) { - DateMathParser forcedDateParser = null; - if (this.format != null) { - forcedDateParser = new DateMathParser(this.format); - } + query = ((DateFieldMapper.DateFieldType) mapper).rangeQuery(from, to, includeLower, includeUpper, - timeZone, forcedDateParser); + timeZone, getForceDateParser(), context); } else { if (timeZone != null) { throw new QueryShardException(context, "[range] time_zone can not be applied to non date field [" + fieldName + "]"); } //LUCENE 4 UPGRADE Mapper#rangeQuery should use bytesref as well? - query = mapper.rangeQuery(from, to, includeLower, includeUpper); + query = mapper.rangeQuery(from, to, includeLower, includeUpper, context); } } else { if (timeZone != null) { diff --git a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index 3ff924b28db..444d79491cb 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -33,15 +33,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.Script; -import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.SearchScript; -import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; -import java.util.Collections; -import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -84,7 +79,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder @Override protected void doXContent(XContentBuilder builder, Params builderParams) throws IOException { builder.startObject(NAME); - builder.field(ScriptField.SCRIPT.getPreferredName(), script); + builder.field(Script.SCRIPT_PARSE_FIELD.getPreferredName(), script); printBoostAndQueryName(builder); builder.endObject(); } @@ -105,7 +100,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder } else if (parseContext.isDeprecatedSetting(currentFieldName)) { // skip } else if (token == XContentParser.Token.START_OBJECT) { - if (parseContext.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { + if (parseContext.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { script = Script.parse(parser, parseContext.getParseFieldMatcher(), parseContext.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); @@ -115,7 +110,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder queryName = parser.text(); } else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { boost = parser.floatValue(); - } else if (parseContext.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { + } else if (parseContext.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { script = Script.parse(parser, parseContext.getParseFieldMatcher(), parseContext.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); @@ -134,18 +129,17 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder @Override protected Query doToQuery(QueryShardContext context) throws IOException { - return new ScriptQuery(script, context.getScriptService(), context.lookup()); + return new ScriptQuery(script, context.getSearchScript(script, ScriptContext.Standard.SEARCH)); } static class ScriptQuery extends Query { - private final Script script; + final Script script; + final SearchScript searchScript; - private final SearchScript searchScript; - - public ScriptQuery(Script script, ScriptService scriptService, SearchLookup searchLookup) { + public ScriptQuery(Script script, SearchScript searchScript) { this.script = script; - this.searchScript = scriptService.search(searchLookup, script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); + this.searchScript = searchScript; } @Override @@ -159,17 +153,23 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder @Override public boolean equals(Object obj) { - if (this == obj) + // TODO: Do this if/when we can assume scripts are pure functions + // and they have a reliable equals impl + /*if (this == obj) return true; if (sameClassAs(obj) == false) return false; ScriptQuery other = (ScriptQuery) obj; - return Objects.equals(script, other.script); + return Objects.equals(script, other.script);*/ + return this == obj; } @Override public int hashCode() { - return Objects.hash(classHash(), script); + // TODO: Do this if/when we can assume scripts are pure functions + // and they have a reliable equals impl + // return Objects.hash(classHash(), script); + return System.identityHashCode(this); } @Override @@ -216,4 +216,6 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder protected boolean doEquals(ScriptQueryBuilder other) { return Objects.equals(script, other.script); } + + } diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java index 151e924ad16..4a49405ec2f 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java @@ -30,10 +30,10 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.index.mapper.MappedFieldType; import java.io.IOException; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.List; @@ -98,14 +98,13 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp */ @Override public Query newFuzzyQuery(String text, int fuzziness) { - if (settings.lowercaseExpandedTerms()) { - text = text.toLowerCase(settings.locale()); - } BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.setDisableCoord(true); for (Map.Entry entry : weights.entrySet()) { + final String fieldName = entry.getKey(); try { - Query query = new FuzzyQuery(new Term(entry.getKey(), text), fuzziness); + final BytesRef term = getAnalyzer().normalize(fieldName, text); + Query query = new FuzzyQuery(new Term(fieldName, term), fuzziness); bq.add(wrapWithBoost(query, entry.getValue()), BooleanClause.Occur.SHOULD); } catch (RuntimeException e) { rethrowUnlessLenient(e); @@ -120,9 +119,18 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp bq.setDisableCoord(true); for (Map.Entry entry : weights.entrySet()) { try { - Query q = createPhraseQuery(entry.getKey(), text, slop); + String field = entry.getKey(); + if (settings.quoteFieldSuffix() != null) { + String quoteField = field + settings.quoteFieldSuffix(); + MappedFieldType quotedFieldType = context.fieldMapper(quoteField); + if (quotedFieldType != null) { + field = quoteField; + } + } + Float boost = entry.getValue(); + Query q = createPhraseQuery(field, text, slop); if (q != null) { - bq.add(wrapWithBoost(q, entry.getValue()), BooleanClause.Occur.SHOULD); + bq.add(wrapWithBoost(q, boost), BooleanClause.Occur.SHOULD); } } catch (RuntimeException e) { rethrowUnlessLenient(e); @@ -137,20 +145,19 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp */ @Override public Query newPrefixQuery(String text) { - if (settings.lowercaseExpandedTerms()) { - text = text.toLowerCase(settings.locale()); - } BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.setDisableCoord(true); for (Map.Entry entry : weights.entrySet()) { + final String fieldName = entry.getKey(); try { if (settings.analyzeWildcard()) { - Query analyzedQuery = newPossiblyAnalyzedQuery(entry.getKey(), text); + Query analyzedQuery = newPossiblyAnalyzedQuery(fieldName, text); if (analyzedQuery != null) { bq.add(wrapWithBoost(analyzedQuery, entry.getValue()), BooleanClause.Occur.SHOULD); } } else { - Query query = new PrefixQuery(new Term(entry.getKey(), text)); + Term term = new Term(fieldName, getAnalyzer().normalize(fieldName, text)); + Query query = new PrefixQuery(term); bq.add(wrapWithBoost(query, entry.getValue()), BooleanClause.Occur.SHOULD); } } catch (RuntimeException e) { @@ -173,11 +180,11 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp * of {@code TermQuery}s and {@code PrefixQuery}s */ private Query newPossiblyAnalyzedQuery(String field, String termStr) { - List> tlist = new ArrayList<> (); + List> tlist = new ArrayList<> (); // get Analyzer from superclass and tokenize the term try (TokenStream source = getAnalyzer().tokenStream(field, termStr)) { source.reset(); - List currentPos = new ArrayList<>(); + List currentPos = new ArrayList<>(); CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class); PositionIncrementAttribute posAtt = source.addAttribute(PositionIncrementAttribute.class); @@ -188,7 +195,8 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp tlist.add(currentPos); currentPos = new ArrayList<>(); } - currentPos.add(termAtt.toString()); + final BytesRef term = getAnalyzer().normalize(field, termAtt.toString()); + currentPos.add(term); hasMoreTokens = source.incrementToken(); } if (currentPos.isEmpty() == false) { @@ -214,7 +222,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp // build a boolean query with prefix on the last position only. BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (int pos = 0; pos < tlist.size(); pos++) { - List plist = tlist.get(pos); + List plist = tlist.get(pos); boolean isLastPos = (pos == tlist.size()-1); Query posQuery; if (plist.size() == 1) { @@ -232,7 +240,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp posQuery = new SynonymQuery(terms); } else { BooleanQuery.Builder innerBuilder = new BooleanQuery.Builder(); - for (String token : plist) { + for (BytesRef token : plist) { innerBuilder.add(new BooleanClause(new PrefixQuery(new Term(field, token)), BooleanClause.Occur.SHOULD)); } @@ -248,14 +256,12 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp * their default values */ static class Settings { - /** Locale to use for parsing. */ - private Locale locale = SimpleQueryStringBuilder.DEFAULT_LOCALE; - /** Specifies whether parsed terms should be lowercased. */ - private boolean lowercaseExpandedTerms = SimpleQueryStringBuilder.DEFAULT_LOWERCASE_EXPANDED_TERMS; /** Specifies whether lenient query parsing should be used. */ private boolean lenient = SimpleQueryStringBuilder.DEFAULT_LENIENT; /** Specifies whether wildcards should be analyzed. */ private boolean analyzeWildcard = SimpleQueryStringBuilder.DEFAULT_ANALYZE_WILDCARD; + /** Specifies a suffix, if any, to apply to field names for phrase matching. */ + private String quoteFieldSuffix = null; /** * Generates default {@link Settings} object (uses ROOT locale, does @@ -264,34 +270,10 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp public Settings() { } - public Settings(Locale locale, Boolean lowercaseExpandedTerms, Boolean lenient, Boolean analyzeWildcard) { - this.locale = locale; - this.lowercaseExpandedTerms = lowercaseExpandedTerms; - this.lenient = lenient; - this.analyzeWildcard = analyzeWildcard; - } - - /** Specifies the locale to use for parsing, Locale.ROOT by default. */ - public void locale(Locale locale) { - this.locale = (locale != null) ? locale : SimpleQueryStringBuilder.DEFAULT_LOCALE; - } - - /** Returns the locale to use for parsing. */ - public Locale locale() { - return this.locale; - } - - /** - * Specifies whether to lowercase parse terms, defaults to true if - * unset. - */ - public void lowercaseExpandedTerms(boolean lowercaseExpandedTerms) { - this.lowercaseExpandedTerms = lowercaseExpandedTerms; - } - - /** Returns whether to lowercase parse terms. */ - public boolean lowercaseExpandedTerms() { - return this.lowercaseExpandedTerms; + public Settings(Settings other) { + this.lenient = other.lenient; + this.analyzeWildcard = other.analyzeWildcard; + this.quoteFieldSuffix = other.quoteFieldSuffix; } /** Specifies whether to use lenient parsing, defaults to false. */ @@ -314,12 +296,24 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp return analyzeWildcard; } + /** + * Set the suffix to append to field names for phrase matching. + */ + public void quoteFieldSuffix(String suffix) { + this.quoteFieldSuffix = suffix; + } + + /** + * Return the suffix to append for phrase matching, or {@code null} if + * no suffix should be appended. + */ + public String quoteFieldSuffix() { + return quoteFieldSuffix; + } + @Override public int hashCode() { - // checking the return value of toLanguageTag() for locales only. - // For further reasoning see - // https://issues.apache.org/jira/browse/LUCENE-4021 - return Objects.hash(locale.toLanguageTag(), lowercaseExpandedTerms, lenient, analyzeWildcard); + return Objects.hash(lenient, analyzeWildcard, quoteFieldSuffix); } @Override @@ -331,14 +325,8 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp return false; } Settings other = (Settings) obj; - - // checking the return value of toLanguageTag() for locales only. - // For further reasoning see - // https://issues.apache.org/jira/browse/LUCENE-4021 - return (Objects.equals(locale.toLanguageTag(), other.locale.toLanguageTag()) - && Objects.equals(lowercaseExpandedTerms, other.lowercaseExpandedTerms) - && Objects.equals(lenient, other.lenient) - && Objects.equals(analyzeWildcard, other.analyzeWildcard)); + return Objects.equals(lenient, other.lenient) && Objects.equals(analyzeWildcard, other.analyzeWildcard) + && Objects.equals(quoteFieldSuffix, other.quoteFieldSuffix); } } } diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index fbe5964f2a0..5bc04d13f8b 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -78,10 +79,6 @@ import java.util.TreeMap; * > online documentation. */ public class SimpleQueryStringBuilder extends AbstractQueryBuilder { - /** Default locale used for parsing.*/ - public static final Locale DEFAULT_LOCALE = Locale.ROOT; - /** Default for lowercasing parsed terms.*/ - public static final boolean DEFAULT_LOWERCASE_EXPANDED_TERMS = true; /** Default for using lenient query parsing.*/ public static final boolean DEFAULT_LENIENT = false; /** Default for wildcard analysis.*/ @@ -94,16 +91,22 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder resolvedFieldsAndWeights = new TreeMap<>(); - // Use the default field if no fields specified - if (fieldsAndWeights.isEmpty()) { - resolvedFieldsAndWeights.put(resolveIndexName(context.defaultField(), context), AbstractQueryBuilder.DEFAULT_BOOST); + + if ((useAllFields != null && useAllFields) && (fieldsAndWeights.size() != 0)) { + throw addValidationError("cannot use [all_fields] parameter in conjunction with [fields]", null); + } + + // If explicitly required to use all fields, use all fields, OR: + // Automatically determine the fields (to replace the _all field) if all of the following are true: + // - The _all field is disabled, + // - and the default_field has not been changed in the settings + // - and no fields are specified in the request + Settings newSettings = new Settings(settings); + if ((this.useAllFields != null && this.useAllFields) || + (context.getMapperService().allEnabled() == false && + "_all".equals(context.defaultField()) && + this.fieldsAndWeights.isEmpty())) { + resolvedFieldsAndWeights = QueryStringQueryBuilder.allQueryableDefaultFields(context); + // Need to use lenient mode when using "all-mode" so exceptions aren't thrown due to mismatched types + newSettings.lenient(true); } else { - for (Map.Entry fieldEntry : fieldsAndWeights.entrySet()) { - if (Regex.isSimpleMatchPattern(fieldEntry.getKey())) { - for (String fieldName : context.getMapperService().simpleMatchToIndexNames(fieldEntry.getKey())) { - resolvedFieldsAndWeights.put(fieldName, fieldEntry.getValue()); + // Use the default field if no fields specified + if (fieldsAndWeights.isEmpty()) { + resolvedFieldsAndWeights.put(resolveIndexName(context.defaultField(), context), AbstractQueryBuilder.DEFAULT_BOOST); + } else { + for (Map.Entry fieldEntry : fieldsAndWeights.entrySet()) { + if (Regex.isSimpleMatchPattern(fieldEntry.getKey())) { + for (String fieldName : context.getMapperService().simpleMatchToIndexNames(fieldEntry.getKey())) { + resolvedFieldsAndWeights.put(fieldName, fieldEntry.getValue()); + } + } else { + resolvedFieldsAndWeights.put(resolveIndexName(fieldEntry.getKey(), context), fieldEntry.getValue()); } - } else { - resolvedFieldsAndWeights.put(resolveIndexName(fieldEntry.getKey(), context), fieldEntry.getValue()); } } } @@ -363,7 +403,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder origin = context.nowInMillis(); } else { if (dateFieldType instanceof LegacyDateFieldMapper.DateFieldType) { - origin = ((LegacyDateFieldMapper.DateFieldType) dateFieldType).parseToMilliseconds(originString, false, null, null); + origin = ((LegacyDateFieldMapper.DateFieldType) dateFieldType).parseToMilliseconds(originString, false, null, null, + context); } else { - origin = ((DateFieldMapper.DateFieldType) dateFieldType).parseToMilliseconds(originString, false, null, null); + origin = ((DateFieldMapper.DateFieldType) dateFieldType).parseToMilliseconds(originString, false, null, null, context); } } diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java index 4f4a3b54a21..ddf796503b7 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java @@ -49,12 +49,19 @@ public class RandomScoreFunctionBuilder extends ScoreFunctionBuilder uidFieldData = context.getForField(fieldType); return new RandomScoreFunction(this.seed == null ? hash(context.nowInMillis()) : seed, salt, uidFieldData); } - /** - * Get the current shard's id for the seed. Protected because this method doesn't work during certain unit tests and needs to be - * replaced. - */ - int getCurrentShardId() { - return SearchContext.current().indexShard().shardId().id(); - } - private static int hash(long value) { return Long.hashCode(value); } diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java index e2fbc5955d7..8a5e950432b 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java @@ -30,12 +30,10 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.script.Script; -import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SearchScript; import java.io.IOException; -import java.util.Collections; import java.util.Objects; /** @@ -74,7 +72,7 @@ public class ScriptScoreFunctionBuilder extends ScoreFunctionBuilder queries = new ArrayList<>(); Term[] terms = new Term[blendedFields.length]; float[] blendedBoost = new float[blendedFields.length]; @@ -249,12 +251,20 @@ public class MultiMatchQuery extends MatchQuery { for (FieldAndFieldType ft : blendedFields) { Query query; try { - query = ft.fieldType.termQuery(value, null); + query = ft.fieldType.termQuery(value, context); } catch (IllegalArgumentException e) { // the query expects a certain class of values such as numbers // of ip addresses and the value can't be parsed, so ignore this // field continue; + } catch (ElasticsearchParseException parseException) { + // date fields throw an ElasticsearchParseException with the + // underlying IAE as the cause, ignore this field if that is + // the case + if (parseException.getCause() instanceof IllegalArgumentException) { + continue; + } + throw parseException; } float boost = ft.boost; while (query instanceof BoostQuery) { diff --git a/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java b/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java index 823f882f40a..16e86f083b3 100644 --- a/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java @@ -35,8 +35,8 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; import org.elasticsearch.index.fielddata.MultiGeoPointValues; -import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper; import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; @@ -63,7 +63,7 @@ public class GeoDistanceRangeQuery extends Query { public GeoDistanceRangeQuery(GeoPoint point, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper, GeoDistance geoDistance, LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType, - IndexGeoPointFieldData indexFieldData, String optimizeBbox) { + IndexGeoPointFieldData indexFieldData, String optimizeBbox, QueryShardContext context) { this.lat = point.lat(); this.lon = point.lon(); this.geoDistance = geoDistance; @@ -96,7 +96,7 @@ public class GeoDistanceRangeQuery extends Query { boundingBoxFilter = null; } else if ("indexed".equals(optimizeBbox)) { boundingBoxFilter = LegacyIndexedGeoBoundingBoxQuery.create(distanceBoundingCheck.topLeft(), - distanceBoundingCheck.bottomRight(), fieldType); + distanceBoundingCheck.bottomRight(), fieldType, context); distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; // fine, we do the bounding box check using the filter } else { throw new IllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported"); diff --git a/core/src/main/java/org/elasticsearch/index/search/geo/GeoPolygonQuery.java b/core/src/main/java/org/elasticsearch/index/search/geo/GeoPolygonQuery.java index c3a52cb114e..bb39d752c0e 100644 --- a/core/src/main/java/org/elasticsearch/index/search/geo/GeoPolygonQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/geo/GeoPolygonQuery.java @@ -32,9 +32,6 @@ import org.elasticsearch.index.fielddata.MultiGeoPointValues; import java.io.IOException; import java.util.Arrays; -/** - * - */ public class GeoPolygonQuery extends Query { private final GeoPoint[] points; diff --git a/core/src/main/java/org/elasticsearch/index/search/geo/LegacyIndexedGeoBoundingBoxQuery.java b/core/src/main/java/org/elasticsearch/index/search/geo/LegacyIndexedGeoBoundingBoxQuery.java index 6fdb2a906c5..7b44ac62ede 100644 --- a/core/src/main/java/org/elasticsearch/index/search/geo/LegacyIndexedGeoBoundingBoxQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/geo/LegacyIndexedGeoBoundingBoxQuery.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper; +import org.elasticsearch.index.query.QueryShardContext; /** * @@ -34,34 +35,34 @@ import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper; public class LegacyIndexedGeoBoundingBoxQuery { public static Query create(GeoPoint topLeft, GeoPoint bottomRight, - LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType) { + LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType, QueryShardContext context) { if (!fieldType.isLatLonEnabled()) { throw new IllegalArgumentException("lat/lon is not enabled (indexed) for field [" + fieldType.name() + "], can't use indexed filter on it"); } //checks to see if bounding box crosses 180 degrees if (topLeft.lon() > bottomRight.lon()) { - return westGeoBoundingBoxFilter(topLeft, bottomRight, fieldType); + return westGeoBoundingBoxFilter(topLeft, bottomRight, fieldType, context); } else { - return eastGeoBoundingBoxFilter(topLeft, bottomRight, fieldType); + return eastGeoBoundingBoxFilter(topLeft, bottomRight, fieldType, context); } } private static Query westGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight, - LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType) { + LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType, QueryShardContext context) { BooleanQuery.Builder filter = new BooleanQuery.Builder(); filter.setMinimumNumberShouldMatch(1); - filter.add(fieldType.lonFieldType().rangeQuery(null, bottomRight.lon(), true, true), Occur.SHOULD); - filter.add(fieldType.lonFieldType().rangeQuery(topLeft.lon(), null, true, true), Occur.SHOULD); - filter.add(fieldType.latFieldType().rangeQuery(bottomRight.lat(), topLeft.lat(), true, true), Occur.MUST); + filter.add(fieldType.lonFieldType().rangeQuery(null, bottomRight.lon(), true, true, context), Occur.SHOULD); + filter.add(fieldType.lonFieldType().rangeQuery(topLeft.lon(), null, true, true, context), Occur.SHOULD); + filter.add(fieldType.latFieldType().rangeQuery(bottomRight.lat(), topLeft.lat(), true, true, context), Occur.MUST); return new ConstantScoreQuery(filter.build()); } private static Query eastGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight, - LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType) { + LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType, QueryShardContext context) { BooleanQuery.Builder filter = new BooleanQuery.Builder(); - filter.add(fieldType.lonFieldType().rangeQuery(topLeft.lon(), bottomRight.lon(), true, true), Occur.MUST); - filter.add(fieldType.latFieldType().rangeQuery(bottomRight.lat(), topLeft.lat(), true, true), Occur.MUST); + filter.add(fieldType.lonFieldType().rangeQuery(topLeft.lon(), bottomRight.lon(), true, true, context), Occur.MUST); + filter.add(fieldType.latFieldType().rangeQuery(bottomRight.lat(), topLeft.lat(), true, true, context), Occur.MUST); return new ConstantScoreQuery(filter.build()); } } diff --git a/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java b/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java index d514571d0e2..3959a697fd0 100644 --- a/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java +++ b/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java @@ -32,8 +32,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; -/** - */ public class SearchStats implements Streamable, ToXContent { public static class Stats implements Streamable, ToXContent { diff --git a/core/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java b/core/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java index 68a2a30a2e9..716667dbb4d 100644 --- a/core/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java +++ b/core/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java @@ -33,8 +33,6 @@ import java.util.function.Consumer; import static java.util.Collections.emptyMap; -/** - */ public final class ShardSearchStats implements SearchOperationListener { private final StatsHolder totalStats = new StatsHolder(); diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index ffd4fd8504e..dbd14845011 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -66,18 +66,14 @@ public class GlobalCheckpointSyncAction extends TransportReplicationActionemptyList(), Collections.emptyList()); diff --git a/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java b/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java index 2a5b3506028..e3b6817f13d 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java +++ b/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java @@ -33,9 +33,6 @@ import java.util.Comparator; import java.util.Iterator; import java.util.List; -/** - * - */ public class CommitPoints implements Iterable { private final List commitPoints; diff --git a/core/src/main/java/org/elasticsearch/index/shard/DocsStats.java b/core/src/main/java/org/elasticsearch/index/shard/DocsStats.java index bfb95b426b1..f8132d557bb 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/DocsStats.java +++ b/core/src/main/java/org/elasticsearch/index/shard/DocsStats.java @@ -27,8 +27,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -/** - */ public class DocsStats implements Streamable, ToXContent { long count = 0; diff --git a/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java b/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java index e632c0669f6..1260a3829d4 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java @@ -26,9 +26,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class IllegalIndexShardStateException extends ElasticsearchException { private final IndexShardState currentState; diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 7d56b5e20b6..3bd1b9a9a0b 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -343,8 +343,21 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl public void updatePrimaryTerm(final long newTerm) { synchronized (mutex) { if (newTerm != primaryTerm) { - assert shardRouting.primary() == false : "a primary shard should never update it's term. shard: " + shardRouting - + " current term [" + primaryTerm + "] new term [" + newTerm + "]"; + // Note that due to cluster state batching an initializing primary shard term can failed and re-assigned + // in one state causing it's term to be incremented. Note that if both current shard state and new + // shard state are initializing, we could replace the current shard and reinitialize it. It is however + // possible that this shard is being started. This can happen if: + // 1) Shard is post recovery and sends shard started to the master + // 2) Node gets disconnected and rejoins + // 3) Master assigns the shard back to the node + // 4) Master processes the shard started and starts the shard + // 5) The node process the cluster state where the shard is both started and primary term is incremented. + // + // We could fail the shard in that case, but this will cause it to be removed from the insync allocations list + // potentially preventing re-allocation. + assert shardRouting.primary() == false || shardRouting.initializing() == false : + "a started primary shard should never update it's term. shard: " + shardRouting + + " current term [" + primaryTerm + "] new term [" + newTerm + "]"; assert newTerm > primaryTerm : "primary terms can only go up. current [" + primaryTerm + "], new [" + newTerm + "]"; primaryTerm = newTerm; } @@ -529,26 +542,27 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return new Engine.Index(uid, doc, seqNo, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry); } - public void index(Engine.Index index) { + public Engine.IndexResult index(Engine.Index index) { ensureWriteAllowed(index); Engine engine = getEngine(); - index(engine, index); + return index(engine, index); } - private void index(Engine engine, Engine.Index index) { + private Engine.IndexResult index(Engine engine, Engine.Index index) { active.set(true); + final Engine.IndexResult result; index = indexingOperationListeners.preIndex(index); try { if (logger.isTraceEnabled()) { logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs()); } - engine.index(index); - index.endTime(System.nanoTime()); + result = engine.index(index); } catch (Exception e) { indexingOperationListeners.postIndex(index, e); throw e; } - indexingOperationListeners.postIndex(index, index.isCreated()); + indexingOperationListeners.postIndex(index, result); + return result; } public Engine.Delete prepareDeleteOnPrimary(String type, String id, long version, VersionType versionType) { @@ -571,30 +585,30 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long version, VersionType versionType, Engine.Operation.Origin origin) { long startTime = System.nanoTime(); - return new Engine.Delete(type, id, uid, seqNo, version, versionType, origin, startTime, false); + return new Engine.Delete(type, id, uid, seqNo, version, versionType, origin, startTime); } - public void delete(Engine.Delete delete) { + public Engine.DeleteResult delete(Engine.Delete delete) { ensureWriteAllowed(delete); Engine engine = getEngine(); - delete(engine, delete); + return delete(engine, delete); } - private void delete(Engine engine, Engine.Delete delete) { + private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) { active.set(true); + final Engine.DeleteResult result; delete = indexingOperationListeners.preDelete(delete); try { if (logger.isTraceEnabled()) { logger.trace("delete [{}]", delete.uid().text()); } - engine.delete(delete); - delete.endTime(System.nanoTime()); + result = engine.delete(delete); } catch (Exception e) { indexingOperationListeners.postDelete(delete, e); throw e; } - - indexingOperationListeners.postDelete(delete); + indexingOperationListeners.postDelete(delete, result); + return result; } public Engine.GetResult get(Engine.Get get) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java index f4e0fb53851..11d9804b100 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardClosedException.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class IndexShardClosedException extends IllegalIndexShardStateException { public IndexShardClosedException(ShardId shardId) { super(shardId, IndexShardState.CLOSED, "Closed"); diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardComponent.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardComponent.java index a1665a7d5c1..154b9addab3 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardComponent.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardComponent.java @@ -21,9 +21,6 @@ package org.elasticsearch.index.shard; import org.elasticsearch.index.IndexSettings; -/** - * - */ public interface IndexShardComponent { ShardId shardId(); diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardNotRecoveringException.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardNotRecoveringException.java index 0e05576f577..7adde5d12c2 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardNotRecoveringException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardNotRecoveringException.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class IndexShardNotRecoveringException extends IllegalIndexShardStateException { public IndexShardNotRecoveringException(ShardId shardId, IndexShardState currentState) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardNotStartedException.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardNotStartedException.java index ecf62bc6195..ec2fd6b3be5 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardNotStartedException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardNotStartedException.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class IndexShardNotStartedException extends IllegalIndexShardStateException { public IndexShardNotStartedException(ShardId shardId, IndexShardState currentState) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java index cde14dec173..df435d9a3b5 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationsLock.java @@ -53,6 +53,9 @@ public class IndexShardOperationsLock implements Closeable { @Override public void close() { closed = true; + if (logger.isTraceEnabled()) { + logger.trace("operation lock on [{}] closed", shardId); + } } /** @@ -120,7 +123,7 @@ public class IndexShardOperationsLock implements Closeable { Releasable releasable; try { synchronized (this) { - releasable = tryAcquire(); + releasable = tryAcquire(onAcquired.toString()); if (releasable == null) { // blockOperations is executing, this operation will be retried by blockOperations once it finishes if (delayedOperations == null) { @@ -142,12 +145,18 @@ public class IndexShardOperationsLock implements Closeable { onAcquired.onResponse(releasable); } - @Nullable private Releasable tryAcquire() throws InterruptedException { + @Nullable private Releasable tryAcquire(String resource) throws InterruptedException { if (semaphore.tryAcquire(1, 0, TimeUnit.SECONDS)) { // the untimed tryAcquire methods do not honor the fairness setting AtomicBoolean closed = new AtomicBoolean(); + if (logger.isTraceEnabled()) { + logger.trace("acquired operation lock on [{}] for resource [{}]", shardId, resource); + } return () -> { if (closed.compareAndSet(false, true)) { semaphore.release(1); + if (logger.isTraceEnabled()) { + logger.trace("released operation lock on [{}] for resource [{}]", shardId, resource); + } } }; } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveringException.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveringException.java index 6c27e4d93e9..93074d41232 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveringException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveringException.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class IndexShardRecoveringException extends IllegalIndexShardStateException { public IndexShardRecoveringException(ShardId shardId) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java index 8ed3c95f92a..4e147498800 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class IndexShardRecoveryException extends ElasticsearchException { public IndexShardRecoveryException(ShardId shardId, String msg, Throwable cause) { super(msg, cause); diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java index 043ad892777..bafa14f2e58 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class IndexShardRelocatedException extends IllegalIndexShardStateException { public IndexShardRelocatedException(ShardId shardId) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardStartedException.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardStartedException.java index 05e7cdf8983..d2cff03c846 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardStartedException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardStartedException.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class IndexShardStartedException extends IllegalIndexShardStateException { public IndexShardStartedException(ShardId shardId) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardState.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardState.java index fa4122cabba..d3c6de7136c 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardState.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardState.java @@ -20,9 +20,6 @@ package org.elasticsearch.index.shard; -/** - * - */ public enum IndexShardState { CREATED((byte) 0), RECOVERING((byte) 1), diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java b/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java index 042ddec924e..36f2765222a 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java @@ -38,12 +38,17 @@ public interface IndexingOperationListener { } /** - * Called after the indexing operation occurred. + * Called after the indexing operation occurred. Note that this is + * also called when indexing a document did not succeed due to document + * related failures. See {@link #postIndex(Engine.Index, Exception)} + * for engine level failures */ - default void postIndex(Engine.Index index, boolean created) {} + default void postIndex(Engine.Index index, Engine.IndexResult result) {} /** - * Called after the indexing operation occurred with exception. + * Called after the indexing operation occurred with engine level exception. + * See {@link #postIndex(Engine.Index, Engine.IndexResult)} for document + * related failures */ default void postIndex(Engine.Index index, Exception ex) {} @@ -56,12 +61,17 @@ public interface IndexingOperationListener { /** - * Called after the delete operation occurred. + * Called after the delete operation occurred. Note that this is + * also called when deleting a document did not succeed due to document + * related failures. See {@link #postDelete(Engine.Delete, Exception)} + * for engine level failures */ - default void postDelete(Engine.Delete delete) {} + default void postDelete(Engine.Delete delete, Engine.DeleteResult result) {} /** - * Called after the delete operation occurred with exception. + * Called after the delete operation occurred with engine level exception. + * See {@link #postDelete(Engine.Delete, Engine.DeleteResult)} for document + * related failures */ default void postDelete(Engine.Delete delete, Exception ex) {} @@ -91,11 +101,11 @@ public interface IndexingOperationListener { } @Override - public void postIndex(Engine.Index index, boolean created) { + public void postIndex(Engine.Index index, Engine.IndexResult result) { assert index != null; for (IndexingOperationListener listener : listeners) { try { - listener.postIndex(index, created); + listener.postIndex(index, result); } catch (Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e); } @@ -129,11 +139,11 @@ public interface IndexingOperationListener { } @Override - public void postDelete(Engine.Delete delete) { + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { assert delete != null; for (IndexingOperationListener listener : listeners) { try { - listener.postDelete(delete); + listener.postDelete(delete, result); } catch (Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexingStats.java b/core/src/main/java/org/elasticsearch/index/shard/IndexingStats.java index 97f9dd2b92f..ba7eafc1a67 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexingStats.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexingStats.java @@ -32,8 +32,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; -/** - */ public class IndexingStats implements Streamable, ToXContent { public static class Stats implements Streamable, ToXContent { diff --git a/core/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java b/core/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java index f62b8f7fe3c..32868a7368a 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java +++ b/core/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java @@ -74,14 +74,18 @@ final class InternalIndexingStats implements IndexingOperationListener { } @Override - public void postIndex(Engine.Index index, boolean created) { - if (!index.origin().isRecovery()) { - long took = index.endTime() - index.startTime(); - totalStats.indexMetric.inc(took); - totalStats.indexCurrent.dec(); - StatsHolder typeStats = typeStats(index.type()); - typeStats.indexMetric.inc(took); - typeStats.indexCurrent.dec(); + public void postIndex(Engine.Index index, Engine.IndexResult result) { + if (result.hasFailure() == false) { + if (!index.origin().isRecovery()) { + long took = result.getTook(); + totalStats.indexMetric.inc(took); + totalStats.indexCurrent.dec(); + StatsHolder typeStats = typeStats(index.type()); + typeStats.indexMetric.inc(took); + typeStats.indexCurrent.dec(); + } + } else { + postIndex(index, result.getFailure()); } } @@ -106,14 +110,18 @@ final class InternalIndexingStats implements IndexingOperationListener { } @Override - public void postDelete(Engine.Delete delete) { - if (!delete.origin().isRecovery()) { - long took = delete.endTime() - delete.startTime(); - totalStats.deleteMetric.inc(took); - totalStats.deleteCurrent.dec(); - StatsHolder typeStats = typeStats(delete.type()); - typeStats.deleteMetric.inc(took); - typeStats.deleteCurrent.dec(); + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { + if (result.hasFailure() == false) { + if (!delete.origin().isRecovery()) { + long took = result.getTook(); + totalStats.deleteMetric.inc(took); + totalStats.deleteCurrent.dec(); + StatsHolder typeStats = typeStats(delete.type()); + typeStats.deleteMetric.inc(took); + typeStats.deleteCurrent.dec(); + } + } else { + postDelete(delete, result.getFailure()); } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java index 7b79f785ff3..d576f4d2ab5 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java @@ -36,8 +36,6 @@ import java.io.IOException; import java.util.Collection; import java.util.concurrent.atomic.AtomicBoolean; -/** - */ final class LocalShardSnapshot implements Closeable { private final IndexShard shard; private final Store store; diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index eab55f6c94a..f02109a8605 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -67,7 +67,7 @@ public final class ShadowIndexShard extends IndexShard { */ @Override public void updateRoutingEntry(ShardRouting newRouting) throws IOException { - if (newRouting.primary() == true) {// becoming a primary + if (newRouting.primary()) {// becoming a primary throw new IllegalStateException("can't promote shard to primary"); } super.updateRoutingEntry(newRouting); diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java b/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java index aa46240fd49..01b78878598 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class ShardNotFoundException extends ResourceNotFoundException { public ShardNotFoundException(ShardId shardId) { this(shardId, null); diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java b/core/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java index 407f271fd65..5f94856a9ca 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java @@ -31,8 +31,6 @@ import org.elasticsearch.gateway.MetaDataStateFormat; import java.io.IOException; import java.io.OutputStream; -/** - */ public final class ShardStateMetaData { private static final String SHARD_STATE_FILE_PREFIX = "state-"; diff --git a/core/src/main/java/org/elasticsearch/index/shard/SnapshotStatus.java b/core/src/main/java/org/elasticsearch/index/shard/SnapshotStatus.java index a72cfc48b65..c7d1938cd6d 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/SnapshotStatus.java +++ b/core/src/main/java/org/elasticsearch/index/shard/SnapshotStatus.java @@ -19,9 +19,6 @@ package org.elasticsearch.index.shard; -/** - * - */ public class SnapshotStatus { public static enum Stage { diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index 5ed2280e366..13ea660b4f0 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -170,7 +170,7 @@ public class TranslogRecoveryPerformer { logger.trace("[translog] recover [delete] op of [{}][{}]", uid.type(), uid.id()); } final Engine.Delete engineDelete = new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.seqNo(), - delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(), origin, System.nanoTime(), false); + delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(), origin, System.nanoTime()); delete(engine, engineDelete); break; default: diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index 51c8bcf5d7e..644caa7520b 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -27,7 +27,7 @@ public class IndexShardSnapshotStatus { /** * Snapshot stage */ - public static enum Stage { + public enum Stage { /** * Snapshot hasn't started yet */ @@ -66,7 +66,7 @@ public class IndexShardSnapshotStatus { private long indexVersion; - private boolean aborted; + private volatile boolean aborted; private String failure; diff --git a/core/src/main/java/org/elasticsearch/index/store/DirectoryService.java b/core/src/main/java/org/elasticsearch/index/store/DirectoryService.java index 90f9ed92712..343c0a11f05 100644 --- a/core/src/main/java/org/elasticsearch/index/store/DirectoryService.java +++ b/core/src/main/java/org/elasticsearch/index/store/DirectoryService.java @@ -26,8 +26,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - */ public abstract class DirectoryService extends AbstractIndexShardComponent { protected DirectoryService(ShardId shardId, IndexSettings indexSettings) { diff --git a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index 69eedd7ef19..1fffabdd152 100644 --- a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -46,8 +46,6 @@ import java.nio.file.Path; import java.util.HashSet; import java.util.Set; -/** - */ public class FsDirectoryService extends DirectoryService implements StoreRateLimiting.Listener, StoreRateLimiting.Provider { protected final IndexStore indexStore; diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 606510ace4b..bec3913a1ab 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -61,8 +61,8 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; @@ -84,6 +84,7 @@ import java.io.EOFException; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; +import java.nio.file.AccessDeniedException; import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.util.ArrayList; @@ -1373,8 +1374,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref for (String file : files) { try { estimatedSize += directory.fileLength(file); - } catch (NoSuchFileException | FileNotFoundException e) { - // ignore, the file is not there no more + } catch (NoSuchFileException | FileNotFoundException | AccessDeniedException e) { + // ignore, the file is not there no more; on Windows, if one thread concurrently deletes a file while + // calling Files.size, you can also sometimes hit AccessDeniedException } } return estimatedSize; diff --git a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java index 2653f01c81d..c284ad8313c 100644 --- a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java +++ b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java @@ -29,9 +29,6 @@ import org.elasticsearch.common.lucene.Lucene; import java.io.IOException; import java.util.Objects; -/** - * - */ public class StoreFileMetaData implements Writeable { public static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_5_0_0; diff --git a/core/src/main/java/org/elasticsearch/index/store/StoreStats.java b/core/src/main/java/org/elasticsearch/index/store/StoreStats.java index d777d7b7830..5b9406f427c 100644 --- a/core/src/main/java/org/elasticsearch/index/store/StoreStats.java +++ b/core/src/main/java/org/elasticsearch/index/store/StoreStats.java @@ -29,8 +29,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -/** - */ public class StoreStats implements Streamable, ToXContent { private long sizeInBytes; diff --git a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java index af7e3a3bd27..133457969cb 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java @@ -35,8 +35,6 @@ import java.nio.channels.FileChannel; import java.nio.file.OpenOption; import java.nio.file.Path; -/** - */ class Checkpoint { final long offset; diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 0abf232fa12..dd5a633e7fa 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -860,14 +860,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } - public Index(Engine.Index index) { + public Index(Engine.Index index, Engine.IndexResult indexResult) { this.id = index.id(); this.type = index.type(); this.source = index.source(); this.routing = index.routing(); this.parent = index.parent(); - this.seqNo = index.seqNo(); - this.version = index.version(); + this.seqNo = indexResult.getSeqNo(); + this.version = indexResult.getVersion(); this.timestamp = index.timestamp(); this.ttl = index.ttl(); this.versionType = index.versionType(); @@ -1039,8 +1039,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } - public Delete(Engine.Delete delete) { - this(delete.uid(), delete.seqNo(), delete.version(), delete.versionType()); + public Delete(Engine.Delete delete, Engine.DeleteResult deleteResult) { + this(delete.uid(), deleteResult.getSeqNo(), deleteResult.getVersion(), delete.versionType()); } /** utility for testing */ @@ -1050,9 +1050,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public Delete(Term uid, long seqNo, long version, VersionType versionType) { this.uid = uid; + this.seqNo = seqNo; this.version = version; this.versionType = versionType; - this.seqNo = seqNo; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogException.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogException.java index 2ebf279588e..7f693a584d9 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogException.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogException.java @@ -25,9 +25,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ public class TranslogException extends ElasticsearchException { public TranslogException(ShardId shardId, String msg) { diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java index 263a658c08d..e60fd2086b9 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java @@ -26,9 +26,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -/** - * - */ public class TranslogStats extends ToXContentToBytes implements Streamable { private long translogSizeInBytes; diff --git a/core/src/main/java/org/elasticsearch/indices/AbstractIndexShardCacheEntity.java b/core/src/main/java/org/elasticsearch/indices/AbstractIndexShardCacheEntity.java index c0d929d82f5..98afd8781b4 100644 --- a/core/src/main/java/org/elasticsearch/indices/AbstractIndexShardCacheEntity.java +++ b/core/src/main/java/org/elasticsearch/indices/AbstractIndexShardCacheEntity.java @@ -19,40 +19,15 @@ package org.elasticsearch.indices; -import org.apache.lucene.index.DirectoryReader; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.cache.RemovalNotification; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.shard.IndexShard; -import java.io.IOException; - /** * Abstract base class for the an {@link IndexShard} level {@linkplain IndicesRequestCache.CacheEntity}. */ abstract class AbstractIndexShardCacheEntity implements IndicesRequestCache.CacheEntity { - @FunctionalInterface - public interface Loader { - void load(StreamOutput out) throws IOException; - } - - private final Loader loader; - private boolean loadedFromCache = true; - - protected AbstractIndexShardCacheEntity(Loader loader) { - this.loader = loader; - } - - /** - * When called after passing this through - * {@link IndicesRequestCache#getOrCompute(IndicesRequestCache.CacheEntity, DirectoryReader, BytesReference)} this will return whether - * or not the result was loaded from the cache. - */ - public final boolean loadedFromCache() { - return loadedFromCache; - } /** * Get the {@linkplain ShardRequestCache} used to track cache statistics. @@ -60,27 +35,7 @@ abstract class AbstractIndexShardCacheEntity implements IndicesRequestCache.Cach protected abstract ShardRequestCache stats(); @Override - public final IndicesRequestCache.Value loadValue() throws IOException { - /* BytesStreamOutput allows to pass the expected size but by default uses - * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie. - * a date histogram with 3 buckets is ~100byte so 16k might be very wasteful - * since we don't shrink to the actual size once we are done serializing. - * By passing 512 as the expected size we will resize the byte array in the stream - * slowly until we hit the page size and don't waste too much memory for small query - * results.*/ - final int expectedSizeInBytes = 512; - try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) { - loader.load(out); - // for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep - // the memory properly paged instead of having varied sized bytes - final BytesReference reference = out.bytes(); - loadedFromCache = false; - return new IndicesRequestCache.Value(reference, out.ramBytesUsed()); - } - } - - @Override - public final void onCached(IndicesRequestCache.Key key, IndicesRequestCache.Value value) { + public final void onCached(IndicesRequestCache.Key key, BytesReference value) { stats().onCached(key, value); } @@ -95,7 +50,7 @@ abstract class AbstractIndexShardCacheEntity implements IndicesRequestCache.Cach } @Override - public final void onRemoval(RemovalNotification notification) { + public final void onRemoval(RemovalNotification notification) { stats().onRemoval(notification.getKey(), notification.getValue(), notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED); } diff --git a/core/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java b/core/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java index 52e801a4cb2..21b944c65fa 100644 --- a/core/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java +++ b/core/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java @@ -25,9 +25,6 @@ import org.elasticsearch.index.Index; import java.io.IOException; -/** - * - */ public class AliasFilterParsingException extends ElasticsearchException { public AliasFilterParsingException(Index index, String name, String desc, Throwable ex) { diff --git a/core/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java b/core/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java index f64addfd2f1..68c1e518650 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexAlreadyExistsException.java @@ -26,9 +26,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class IndexAlreadyExistsException extends ElasticsearchException { public IndexAlreadyExistsException(Index index) { diff --git a/core/src/main/java/org/elasticsearch/indices/IndexCreationException.java b/core/src/main/java/org/elasticsearch/indices/IndexCreationException.java index 1248d73b81e..da823abf537 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexCreationException.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexCreationException.java @@ -26,8 +26,6 @@ import org.elasticsearch.index.Index; import java.io.IOException; -/** - */ public class IndexCreationException extends ElasticsearchException implements ElasticsearchWrapperException { public IndexCreationException(String index, Throwable cause) { diff --git a/core/src/main/java/org/elasticsearch/indices/IndexTemplateAlreadyExistsException.java b/core/src/main/java/org/elasticsearch/indices/IndexTemplateAlreadyExistsException.java index 3b665e051e0..920d2ea51b7 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexTemplateAlreadyExistsException.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexTemplateAlreadyExistsException.java @@ -25,9 +25,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class IndexTemplateAlreadyExistsException extends ElasticsearchException { private final String name; diff --git a/core/src/main/java/org/elasticsearch/indices/IndexTemplateMissingException.java b/core/src/main/java/org/elasticsearch/indices/IndexTemplateMissingException.java index db3b232d0e3..83fc2623390 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexTemplateMissingException.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexTemplateMissingException.java @@ -25,9 +25,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class IndexTemplateMissingException extends ElasticsearchException { private final String name; diff --git a/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index 3b4258a8bdf..11dbfb36f4f 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -189,11 +189,6 @@ public class IndexingMemoryController extends AbstractComponent implements Index statusChecker.run(); } - /** called by IndexShard to record that this many bytes were written to translog */ - public void bytesWritten(int bytes) { - statusChecker.bytesWritten(bytes); - } - /** Asks this shard to throttle indexing to one thread */ protected void activateThrottling(IndexShard shard) { shard.activateThrottling(); @@ -205,17 +200,20 @@ public class IndexingMemoryController extends AbstractComponent implements Index } @Override - public void postIndex(Engine.Index index, boolean created) { - recordOperationBytes(index); + public void postIndex(Engine.Index index, Engine.IndexResult result) { + recordOperationBytes(index, result); } @Override - public void postDelete(Engine.Delete delete) { - recordOperationBytes(delete); + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { + recordOperationBytes(delete, result); } - private void recordOperationBytes(Engine.Operation op) { - bytesWritten(op.sizeInBytes()); + /** called by IndexShard to record estimated bytes written to translog for the operation */ + private void recordOperationBytes(Engine.Operation operation, Engine.Result result) { + if (result.hasFailure() == false) { + statusChecker.bytesWritten(operation.estimatedSizeInBytes()); + } } private static final class ShardAndBytesUsed implements Comparable { diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index 8465dfbd540..c95a8866544 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -22,12 +22,9 @@ package org.elasticsearch.indices; import org.elasticsearch.action.admin.indices.rollover.Condition; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; -import org.elasticsearch.action.update.UpdateHelper; -import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.AllFieldMapper; import org.elasticsearch.index.mapper.BinaryFieldMapper; import org.elasticsearch.index.mapper.BooleanFieldMapper; @@ -80,16 +77,11 @@ import java.util.Map; * Configures classes and services that are shared by indices on each node. */ public class IndicesModule extends AbstractModule { - - private final Map mapperParsers; - private final Map metadataMapperParsers; - private final MapperRegistry mapperRegistry; private final List namedWritables = new ArrayList<>(); + private final MapperRegistry mapperRegistry; public IndicesModule(List mapperPlugins) { - this.mapperParsers = getMappers(mapperPlugins); - this.metadataMapperParsers = getMetadataMappers(mapperPlugins); - this.mapperRegistry = new MapperRegistry(mapperParsers, metadataMapperParsers); + this.mapperRegistry = new MapperRegistry(getMappers(mapperPlugins), getMetadataMappers(mapperPlugins)); registerBuiltinWritables(); } @@ -176,28 +168,18 @@ public class IndicesModule extends AbstractModule { @Override protected void configure() { - bindMapperExtension(); - - bind(RecoverySettings.class).asEagerSingleton(); - bind(PeerRecoveryTargetService.class).asEagerSingleton(); - bind(PeerRecoverySourceService.class).asEagerSingleton(); bind(IndicesStore.class).asEagerSingleton(); bind(IndicesClusterStateService.class).asEagerSingleton(); bind(SyncedFlushService.class).asEagerSingleton(); bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton(); bind(IndicesTTLService.class).asEagerSingleton(); - bind(UpdateHelper.class).asEagerSingleton(); - bind(MetaDataIndexUpgradeService.class).asEagerSingleton(); - bind(NodeServicesProvider.class).asEagerSingleton(); bind(GlobalCheckpointSyncAction.class).asEagerSingleton(); } - // public for testing + /** + * A registry for all field mappers. + */ public MapperRegistry getMapperRegistry() { return mapperRegistry; } - - protected void bindMapperExtension() { - bind(MapperRegistry.class).toInstance(getMapperRegistry()); - } } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index ff3713a374f..0fcda5c8fd5 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -41,13 +41,12 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import java.io.Closeable; -import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.Set; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; /** * The indices request cache allows to cache a shard level request stage responses, helping with improving @@ -63,7 +62,7 @@ import java.util.concurrent.TimeUnit; * is functional. */ public final class IndicesRequestCache extends AbstractComponent implements RemovalListener, Closeable { + BytesReference>, Closeable { /** * A setting to enable or disable request caching on an index level. Its dynamic by default @@ -80,17 +79,17 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo private final Set keysToClean = ConcurrentCollections.newConcurrentSet(); private final ByteSizeValue size; private final TimeValue expire; - private final Cache cache; + private final Cache cache; IndicesRequestCache(Settings settings) { super(settings); this.size = INDICES_CACHE_QUERY_SIZE.get(settings); this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null; long sizeInBytes = size.getBytes(); - CacheBuilder cacheBuilder = CacheBuilder.builder() + CacheBuilder cacheBuilder = CacheBuilder.builder() .setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this); if (expire != null) { - cacheBuilder.setExpireAfterAccess(TimeUnit.MILLISECONDS.toNanos(expire.millis())); + cacheBuilder.setExpireAfterAccess(expire); } cache = cacheBuilder.build(); } @@ -106,15 +105,16 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo } @Override - public void onRemoval(RemovalNotification notification) { + public void onRemoval(RemovalNotification notification) { notification.getKey().entity.onRemoval(notification); } - BytesReference getOrCompute(CacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) throws Exception { + BytesReference getOrCompute(CacheEntity cacheEntity, Supplier loader, + DirectoryReader reader, BytesReference cacheKey) throws Exception { final Key key = new Key(cacheEntity, reader.getVersion(), cacheKey); - Loader loader = new Loader(cacheEntity); - Value value = cache.computeIfAbsent(key, loader); - if (loader.isLoaded()) { + Loader cacheLoader = new Loader(cacheEntity, loader); + BytesReference value = cache.computeIfAbsent(key, cacheLoader); + if (cacheLoader.isLoaded()) { key.entity.onMiss(); // see if its the first time we see this reader, and make sure to register a cleanup key CleanupKey cleanupKey = new CleanupKey(cacheEntity, reader.getVersion()); @@ -127,16 +127,18 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo } else { key.entity.onHit(); } - return value.reference; + return value; } - private static class Loader implements CacheLoader { + private static class Loader implements CacheLoader { private final CacheEntity entity; + private final Supplier loader; private boolean loaded; - Loader(CacheEntity entity) { + Loader(CacheEntity entity, Supplier loader) { this.entity = entity; + this.loader = loader; } public boolean isLoaded() { @@ -144,8 +146,8 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo } @Override - public Value load(Key key) throws Exception { - Value value = entity.loadValue(); + public BytesReference load(Key key) throws Exception { + BytesReference value = loader.get(); entity.onCached(key, value); loaded = true; return value; @@ -155,16 +157,12 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo /** * Basic interface to make this cache testable. */ - interface CacheEntity { - /** - * Loads the actual cache value. this is the heavy lifting part. - */ - Value loadValue() throws IOException; + interface CacheEntity extends Accountable { /** - * Called after the value was loaded via {@link #loadValue()} + * Called after the value was loaded. */ - void onCached(Key key, Value value); + void onCached(Key key, BytesReference value); /** * Returns true iff the resource behind this entity is still open ie. @@ -191,32 +189,12 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo /** * Called when this entity instance is removed */ - void onRemoval(RemovalNotification notification); - } - - - - static class Value implements Accountable { - final BytesReference reference; - final long ramBytesUsed; - - Value(BytesReference reference, long ramBytesUsed) { - this.reference = reference; - this.ramBytesUsed = ramBytesUsed; - } - - @Override - public long ramBytesUsed() { - return ramBytesUsed; - } - - @Override - public Collection getChildResources() { - return Collections.emptyList(); - } + void onRemoval(RemovalNotification notification); } static class Key implements Accountable { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class); + public final CacheEntity entity; // use as identity equality public final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped public final BytesReference value; @@ -229,7 +207,7 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo @Override public long ramBytesUsed() { - return RamUsageEstimator.NUM_BYTES_OBJECT_REF + Long.BYTES + value.length(); + return BASE_RAM_BYTES_USED + entity.ramBytesUsed() + value.length(); } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 577845e664e..749326287b1 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -20,13 +20,14 @@ package org.elasticsearch.indices; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; @@ -35,6 +36,7 @@ import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -43,14 +45,17 @@ import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -59,9 +64,11 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; @@ -72,7 +79,6 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.engine.Engine; @@ -82,6 +88,7 @@ import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.merge.MergeStats; +import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; @@ -93,7 +100,6 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreConfig; -import org.elasticsearch.indices.AbstractIndexShardCacheEntity.Loader; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -103,6 +109,8 @@ import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QueryPhase; @@ -113,7 +121,6 @@ import java.io.Closeable; import java.io.IOException; import java.nio.file.Files; import java.util.ArrayList; -import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; @@ -127,9 +134,12 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; +import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; @@ -152,6 +162,10 @@ public class IndicesService extends AbstractLifecycleComponent private final CacheCleaner cacheCleaner; private final ThreadPool threadPool; private final CircuitBreakerService circuitBreakerService; + private final BigArrays bigArrays; + private final ScriptService scriptService; + private final ClusterService clusterService; + private final Client client; private volatile Map indices = emptyMap(); private final Map> pendingDeletes = new HashMap<>(); private final AtomicInteger numUncompletedDeletes = new AtomicInteger(); @@ -176,6 +190,7 @@ public class IndicesService extends AbstractLifecycleComponent IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver, MapperRegistry mapperRegistry, NamedWriteableRegistry namedWriteableRegistry, ThreadPool threadPool, IndexScopedSettings indexScopedSettings, CircuitBreakerService circuitBreakerService, + BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService, Client client, MetaStateService metaStateService) { super(settings); this.threadPool = threadPool; @@ -197,6 +212,10 @@ public class IndicesService extends AbstractLifecycleComponent () -> Iterables.flatten(this).iterator()); this.indexScopeSetting = indexScopedSettings; this.circuitBreakerService = circuitBreakerService; + this.bigArrays = bigArrays; + this.scriptService = scriptService; + this.clusterService = clusterService; + this.client = client; this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { @Override public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) { @@ -363,7 +382,7 @@ public class IndicesService extends AbstractLifecycleComponent * @throws IndexAlreadyExistsException if the index already exists. */ @Override - public synchronized IndexService createIndex(final NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData, List builtInListeners, Consumer globalCheckpointSyncer) throws IOException { + public synchronized IndexService createIndex(IndexMetaData indexMetaData, List builtInListeners, Consumer globalCheckpointSyncer) throws IOException { ensureChangesAllowed(); if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) { throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]"); @@ -381,8 +400,15 @@ public class IndicesService extends AbstractLifecycleComponent }; finalListeners.add(onStoreClose); finalListeners.add(oldShardsStats); - final IndexService indexService = createIndexService("create index", nodeServicesProvider, indexMetaData, indicesQueryCache, - indicesFieldDataCache, finalListeners, globalCheckpointSyncer, indexingMemoryController); + final IndexService indexService = + createIndexService( + "create index", + indexMetaData, + indicesQueryCache, + indicesFieldDataCache, + finalListeners, + globalCheckpointSyncer, + indexingMemoryController); boolean success = false; try { indexService.getIndexEventListener().afterIndexCreated(indexService); @@ -399,14 +425,13 @@ public class IndicesService extends AbstractLifecycleComponent /** * This creates a new IndexService without registering it */ - private synchronized IndexService createIndexService(final String reason, final NodeServicesProvider nodeServicesProvider, + private synchronized IndexService createIndexService(final String reason, IndexMetaData indexMetaData, IndicesQueryCache indicesQueryCache, IndicesFieldDataCache indicesFieldDataCache, List builtInListeners, Consumer globalCheckpointSyncer, IndexingOperationListener... indexingOperationListeners) throws IOException { final Index index = indexMetaData.getIndex(); - final ClusterService clusterService = nodeServicesProvider.getClusterService(); final Predicate indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(index.getName(), indexExpression, clusterService.state()); final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting); logger.debug("creating Index [{}], shards [{}]/[{}{}] - reason [{}]", @@ -423,7 +448,19 @@ public class IndicesService extends AbstractLifecycleComponent for (IndexEventListener listener : builtInListeners) { indexModule.addIndexEventListener(listener); } - return indexModule.newIndexService(nodeEnv, this, nodeServicesProvider, indicesQueryCache, mapperRegistry, globalCheckpointSyncer, + return indexModule.newIndexService( + nodeEnv, + this, + circuitBreakerService, + bigArrays, + threadPool, + scriptService, + indicesQueriesRegistry, + clusterService, + client, + indicesQueryCache, + mapperRegistry, + globalCheckpointSyncer, indicesFieldDataCache); } @@ -433,7 +470,7 @@ public class IndicesService extends AbstractLifecycleComponent * This method will throw an exception if the creation or the update fails. * The created {@link IndexService} will not be registered and will be closed immediately. */ - public synchronized void verifyIndexMetadata(final NodeServicesProvider nodeServicesProvider, IndexMetaData metaData, IndexMetaData metaDataUpdate) throws IOException { + public synchronized void verifyIndexMetadata(IndexMetaData metaData, IndexMetaData metaDataUpdate) throws IOException { final List closeables = new ArrayList<>(); try { IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {}); @@ -441,9 +478,8 @@ public class IndicesService extends AbstractLifecycleComponent IndicesQueryCache indicesQueryCache = new IndicesQueryCache(settings); closeables.add(indicesQueryCache); // this will also fail if some plugin fails etc. which is nice since we can verify that early - final IndexService service = createIndexService("metadata verification", nodeServicesProvider, - metaData, indicesQueryCache, indicesFieldDataCache, Collections.emptyList(), s -> { - }); + final IndexService service = + createIndexService("metadata verification", metaData, indicesQueryCache, indicesFieldDataCache, emptyList(), s -> {}); closeables.add(() -> service.close("metadata verification", false)); for (ObjectCursor typeMapping : metaData.getMappings().values()) { // don't apply the default mapping, it has been applied when the mapping was created @@ -461,7 +497,7 @@ public class IndicesService extends AbstractLifecycleComponent @Override public IndexShard createShard(ShardRouting shardRouting, RecoveryState recoveryState, PeerRecoveryTargetService recoveryTargetService, PeerRecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService, - NodeServicesProvider nodeServicesProvider, Callback onShardFailure) throws IOException { + Callback onShardFailure) throws IOException { ensureChangesAllowed(); IndexService indexService = indexService(shardRouting.index()); IndexShard indexShard = indexService.createShard(shardRouting); @@ -471,7 +507,7 @@ public class IndicesService extends AbstractLifecycleComponent assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS: "mapping update consumer only required by local shards recovery"; try { - nodeServicesProvider.getClient().admin().indices().preparePutMapping() + client.admin().indices().preparePutMapping() .setConcreteIndex(shardRouting.index()) // concrete index - no name clash, it uses uuid .setType(type) .setSource(mapping.source().string()) @@ -1100,7 +1136,7 @@ public class IndicesService extends AbstractLifecycleComponent } // if now in millis is used (or in the future, a more generic "isDeterministic" flag // then we can't cache based on "now" key within the search request, as it is not deterministic - if (context.nowInMillisUsed()) { + if (context.getQueryShardContext().isCachable() == false) { return false; } return true; @@ -1111,7 +1147,7 @@ public class IndicesService extends AbstractLifecycleComponent if (shard == null) { return; } - indicesRequestCache.clear(new IndexShardCacheEntity(shard, null)); + indicesRequestCache.clear(new IndexShardCacheEntity(shard)); logger.trace("{} explicit cache clear", shard.shardId()); } @@ -1123,13 +1159,19 @@ public class IndicesService extends AbstractLifecycleComponent */ public void loadIntoContext(ShardSearchRequest request, SearchContext context, QueryPhase queryPhase) throws Exception { assert canCache(request, context); - final IndexShardCacheEntity entity = new IndexShardCacheEntity(context.indexShard(), out -> { - queryPhase.execute(context); - context.queryResult().writeToNoId(out); - }); final DirectoryReader directoryReader = context.searcher().getDirectoryReader(); - final BytesReference bytesReference = indicesRequestCache.getOrCompute(entity, directoryReader, request.cacheKey()); - if (entity.loadedFromCache()) { + + boolean[] loadedFromCache = new boolean[] { true }; + BytesReference bytesReference = cacheShardLevelResult(context.indexShard(), directoryReader, request.cacheKey(), out -> { + queryPhase.execute(context); + try { + context.queryResult().writeToNoId(out); + } catch (IOException e) { + throw new AssertionError("Could not serialize response", e); + } + loadedFromCache[0] = false; + }); + if (loadedFromCache[0]) { // restore the cached query result into the context final QuerySearchResult result = context.queryResult(); StreamInput in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistry); @@ -1155,7 +1197,11 @@ public class IndicesService extends AbstractLifecycleComponent } BytesReference cacheKey = new BytesArray("fieldstats:" + field); BytesReference statsRef = cacheShardLevelResult(shard, searcher.getDirectoryReader(), cacheKey, out -> { - out.writeOptionalWriteable(fieldType.stats(searcher.reader())); + try { + out.writeOptionalWriteable(fieldType.stats(searcher.reader())); + } catch (IOException e) { + throw new IllegalStateException("Failed to write field stats output", e); + } }); try (StreamInput in = statsRef.streamInput()) { return in.readOptionalWriteable(FieldStats::readFrom); @@ -1174,17 +1220,33 @@ public class IndicesService extends AbstractLifecycleComponent * @param loader loads the data into the cache if needed * @return the contents of the cache or the result of calling the loader */ - private BytesReference cacheShardLevelResult(IndexShard shard, DirectoryReader reader, BytesReference cacheKey, Loader loader) + private BytesReference cacheShardLevelResult(IndexShard shard, DirectoryReader reader, BytesReference cacheKey, Consumer loader) throws Exception { - IndexShardCacheEntity cacheEntity = new IndexShardCacheEntity(shard, loader); - return indicesRequestCache.getOrCompute(cacheEntity, reader, cacheKey); + IndexShardCacheEntity cacheEntity = new IndexShardCacheEntity(shard); + Supplier supplier = () -> { + /* BytesStreamOutput allows to pass the expected size but by default uses + * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie. + * a date histogram with 3 buckets is ~100byte so 16k might be very wasteful + * since we don't shrink to the actual size once we are done serializing. + * By passing 512 as the expected size we will resize the byte array in the stream + * slowly until we hit the page size and don't waste too much memory for small query + * results.*/ + final int expectedSizeInBytes = 512; + try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) { + loader.accept(out); + // for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep + // the memory properly paged instead of having varied sized bytes + return out.bytes(); + } + }; + return indicesRequestCache.getOrCompute(cacheEntity, supplier, reader, cacheKey); } static final class IndexShardCacheEntity extends AbstractIndexShardCacheEntity { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexShardCacheEntity.class); private final IndexShard indexShard; - protected IndexShardCacheEntity(IndexShard indexShard, Loader loader) { - super(loader); + protected IndexShardCacheEntity(IndexShard indexShard) { this.indexShard = indexShard; } @@ -1202,6 +1264,13 @@ public class IndicesService extends AbstractLifecycleComponent public Object getCacheIdentity() { return indexShard; } + + @Override + public long ramBytesUsed() { + // No need to take the IndexShard into account since it is shared + // across many entities + return BASE_RAM_BYTES_USED; + } } @FunctionalInterface @@ -1213,4 +1282,12 @@ public class IndicesService extends AbstractLifecycleComponent (Index index, IndexSettings indexSettings) -> canDeleteIndexContents(index, indexSettings); private final IndexDeletionAllowedPredicate ALWAYS_TRUE = (Index index, IndexSettings indexSettings) -> true; + public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) { + Function factory = + (parser) -> new QueryParseContext(indicesQueriesRegistry, parser, new ParseFieldMatcher(settings)); + String[] aliases = indexNameExpressionResolver.filteringAliases(state, index, expressions); + IndexMetaData indexMetaData = state.metaData().index(index); + return new AliasFilter(ShardSearchRequest.parseAliasFilter(factory, indexMetaData, aliases), aliases); + } + } diff --git a/core/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java b/core/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java index 4e2c443ff4a..8786b206477 100644 --- a/core/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java +++ b/core/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java @@ -26,9 +26,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class InvalidAliasNameException extends ElasticsearchException { public InvalidAliasNameException(Index index, String name, String desc) { @@ -36,6 +33,10 @@ public class InvalidAliasNameException extends ElasticsearchException { setIndex(index); } + public InvalidAliasNameException(String name, String description) { + super("Invalid alias name [{}]: {}", name, description); + } + public InvalidAliasNameException(StreamInput in) throws IOException{ super(in); } diff --git a/core/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java b/core/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java index 34dd327c91a..3770398864f 100644 --- a/core/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java +++ b/core/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java @@ -26,9 +26,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class InvalidIndexNameException extends ElasticsearchException { public InvalidIndexNameException(String name, String desc) { diff --git a/core/src/main/java/org/elasticsearch/indices/InvalidIndexTemplateException.java b/core/src/main/java/org/elasticsearch/indices/InvalidIndexTemplateException.java index e3ed97415ef..38b5889aea8 100644 --- a/core/src/main/java/org/elasticsearch/indices/InvalidIndexTemplateException.java +++ b/core/src/main/java/org/elasticsearch/indices/InvalidIndexTemplateException.java @@ -25,9 +25,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class InvalidIndexTemplateException extends ElasticsearchException { private final String name; diff --git a/core/src/main/java/org/elasticsearch/indices/InvalidTypeNameException.java b/core/src/main/java/org/elasticsearch/indices/InvalidTypeNameException.java index 2279860d80c..05030b52ba4 100644 --- a/core/src/main/java/org/elasticsearch/indices/InvalidTypeNameException.java +++ b/core/src/main/java/org/elasticsearch/indices/InvalidTypeNameException.java @@ -25,9 +25,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class InvalidTypeNameException extends MapperException { public InvalidTypeNameException(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java index 6c251d3bf1c..9133ca81e28 100644 --- a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java +++ b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java @@ -188,10 +188,11 @@ public class NodeIndicesStats implements Streamable, ToXContent { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - String level = params.param("level", "node"); - boolean isLevelValid = "node".equalsIgnoreCase(level) || "indices".equalsIgnoreCase(level) || "shards".equalsIgnoreCase(level); + final String level = params.param("level", "node"); + final boolean isLevelValid = + "indices".equalsIgnoreCase(level) || "node".equalsIgnoreCase(level) || "shards".equalsIgnoreCase(level); if (!isLevelValid) { - return builder; + throw new IllegalArgumentException("level parameter must be one of [indices] or [node] or [shards] but was [" + level + "]"); } // "node" level diff --git a/core/src/main/java/org/elasticsearch/indices/TypeMissingException.java b/core/src/main/java/org/elasticsearch/indices/TypeMissingException.java index 5ab9744933b..d7a4820db3f 100644 --- a/core/src/main/java/org/elasticsearch/indices/TypeMissingException.java +++ b/core/src/main/java/org/elasticsearch/indices/TypeMissingException.java @@ -27,9 +27,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.util.Arrays; -/** - * - */ public class TypeMissingException extends ElasticsearchException { public TypeMissingException(Index index, String... types) { diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java index 23ef9bdcd3f..6c4a3cc2578 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java @@ -68,9 +68,6 @@ import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; import java.util.Locale; -/** - * - */ public enum PreBuiltAnalyzers { STANDARD(CachingStrategy.ELASTICSEARCH) { diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java index ddda8a08745..823152e6d9e 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java @@ -24,9 +24,6 @@ import org.elasticsearch.Version; import java.util.HashMap; import java.util.Map; -/** - * - */ public class PreBuiltCacheFactory { /** diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCharFilters.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCharFilters.java index dcf6c295303..063763006a0 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCharFilters.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCharFilters.java @@ -26,9 +26,6 @@ import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; import java.io.Reader; import java.util.Locale; -/** - * - */ public enum PreBuiltCharFilters { HTML_STRIP(CachingStrategy.ONE) { diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java index a31f60fc5bd..e1189e3197d 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java @@ -72,9 +72,6 @@ import org.tartarus.snowball.ext.FrenchStemmer; import java.util.Locale; -/** - * - */ public enum PreBuiltTokenFilters { WORD_DELIMITER(CachingStrategy.ONE) { diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java index 424b5e4534f..ce623953030 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java @@ -38,9 +38,6 @@ import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; import java.util.Locale; -/** - * - */ public enum PreBuiltTokenizers { STANDARD(CachingStrategy.LUCENE) { diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 471d9737e16..0ee93db4852 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.Type; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingTable; @@ -41,7 +40,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; @@ -54,7 +52,6 @@ import org.elasticsearch.index.IndexComponent; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexShardAlreadyExistsException; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.seqno.GlobalCheckpointService; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; import org.elasticsearch.index.shard.IndexEventListener; @@ -73,6 +70,7 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.search.SearchService; import org.elasticsearch.snapshots.RestoreService; +import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -97,7 +95,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private final PeerRecoveryTargetService recoveryTargetService; private final ShardStateAction shardStateAction; private final NodeMappingRefreshAction nodeMappingRefreshAction; - private final NodeServicesProvider nodeServicesProvider; private final Consumer globalCheckpointSyncer; private static final ShardStateAction.Listener SHARD_STATE_ACTION_LISTENER = new ShardStateAction.Listener() { @@ -121,12 +118,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple NodeMappingRefreshAction nodeMappingRefreshAction, RepositoriesService repositoriesService, RestoreService restoreService, SearchService searchService, SyncedFlushService syncedFlushService, - PeerRecoverySourceService peerRecoverySourceService, NodeServicesProvider nodeServicesProvider, + PeerRecoverySourceService peerRecoverySourceService, SnapshotShardsService snapshotShardsService, GlobalCheckpointSyncAction globalCheckpointSyncAction) { - this(settings, indicesService, + this(settings, (AllocatedIndices>) indicesService, clusterService, threadPool, recoveryTargetService, shardStateAction, nodeMappingRefreshAction, repositoriesService, restoreService, searchService, syncedFlushService, peerRecoverySourceService, - nodeServicesProvider, globalCheckpointSyncAction::updateCheckpointForShard); + snapshotShardsService, globalCheckpointSyncAction::updateCheckpointForShard); } // for tests @@ -138,10 +135,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple NodeMappingRefreshAction nodeMappingRefreshAction, RepositoriesService repositoriesService, RestoreService restoreService, SearchService searchService, SyncedFlushService syncedFlushService, - PeerRecoverySourceService peerRecoverySourceService, NodeServicesProvider nodeServicesProvider, + PeerRecoverySourceService peerRecoverySourceService, SnapshotShardsService snapshotShardsService, Consumer globalCheckpointSyncer) { super(settings); - this.buildInIndexListener = Arrays.asList(peerRecoverySourceService, recoveryTargetService, searchService, syncedFlushService); + this.buildInIndexListener = Arrays.asList(peerRecoverySourceService, recoveryTargetService, searchService, syncedFlushService, + snapshotShardsService); this.indicesService = indicesService; this.clusterService = clusterService; this.threadPool = threadPool; @@ -151,18 +149,22 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple this.restoreService = restoreService; this.repositoriesService = repositoriesService; this.sendRefreshMapping = this.settings.getAsBoolean("indices.cluster.send_refresh_mapping", true); - this.nodeServicesProvider = nodeServicesProvider; this.globalCheckpointSyncer = globalCheckpointSyncer; } @Override protected void doStart() { - clusterService.addFirst(this); + // Doesn't make sense to manage shards on non-master and non-data nodes + if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isMasterNode(settings)) { + clusterService.addFirst(this); + } } @Override protected void doStop() { - clusterService.remove(this); + if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isMasterNode(settings)) { + clusterService.remove(this); + } } @Override @@ -195,7 +197,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple failMissingShards(state); - removeShards(state); + removeShards(state); // removes any local shards that doesn't match what the master expects updateIndices(event); // can also fail shards, but these are then guaranteed to be in failedShardsCache @@ -379,11 +381,21 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple ShardRouting currentRoutingEntry = shard.routingEntry(); ShardId shardId = currentRoutingEntry.shardId(); ShardRouting newShardRouting = localRoutingNode == null ? null : localRoutingNode.getByShardId(shardId); - if (newShardRouting == null || newShardRouting.isSameAllocation(currentRoutingEntry) == false) { + if (newShardRouting == null) { // we can just remove the shard without cleaning it locally, since we will clean it in IndicesStore // once all shards are allocated logger.debug("{} removing shard (not allocated)", shardId); indexService.removeShard(shardId.id(), "removing shard (not allocated)"); + } else if (newShardRouting.isSameAllocation(currentRoutingEntry) == false) { + logger.debug("{} removing shard (stale allocation id, stale {}, new {})", shardId, + currentRoutingEntry, newShardRouting); + indexService.removeShard(shardId.id(), "removing shard (stale copy)"); + } else if (newShardRouting.initializing() && currentRoutingEntry.active()) { + // this can happen if the node was isolated/gc-ed, rejoins the cluster and a new shard with the same allocation id + // is assigned to it. Batch cluster state processing or if shard fetching completes before the node gets a new cluster + // state may result in a new shard being initialized while having the same allocation id as the currently started shard. + logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting); + indexService.removeShard(shardId.id(), "removing shard (stale copy)"); } else { // remove shards where recovery source has changed. This re-initializes shards later in createOrUpdateShards if (newShardRouting.recoverySource() != null && newShardRouting.recoverySource().getType() == Type.PEER) { @@ -427,8 +439,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple AllocatedIndex indexService = null; try { - indexService = - indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener, globalCheckpointSyncer); + indexService = indicesService.createIndex(indexMetaData, buildInIndexListener, globalCheckpointSyncer); if (indexService.updateMapping(indexMetaData) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh(state.nodes().getMasterNode(), new NodeMappingRefreshAction.NodeMappingRefreshRequest(indexMetaData.getIndex().getName(), @@ -527,7 +538,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple logger.debug("{} creating shard", shardRouting.shardId()); RecoveryState recoveryState = new RecoveryState(shardRouting, nodes.getLocalNode(), sourceNode); indicesService.createShard(shardRouting, recoveryState, recoveryTargetService, new RecoveryListener(shardRouting), - repositoriesService, nodeServicesProvider, failedShardHandler); + repositoriesService, failedShardHandler); } catch (IndexShardAlreadyExistsException e) { // ignore this, the method call can happen several times logger.debug("Trying to create shard that already exists", e); @@ -577,8 +588,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple /** * Finds the routing source node for peer recovery, return null if its not found. Note, this method expects the shard - * routing to *require* peer recovery, use {@link ShardRouting#recoverySource()} to - * check if its needed or not. + * routing to *require* peer recovery, use {@link ShardRouting#recoverySource()} to check if its needed or not. */ private static DiscoveryNode findSourceNodeForPeerRecovery(Logger logger, RoutingTable routingTable, DiscoveryNodes nodes, ShardRouting shardRouting) { @@ -617,29 +627,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple @Override public void onRecoveryDone(RecoveryState state) { - if (state.getRecoverySource().getType() == Type.SNAPSHOT) { - SnapshotRecoverySource snapshotRecoverySource = (SnapshotRecoverySource) state.getRecoverySource(); - restoreService.indexShardRestoreCompleted(snapshotRecoverySource.snapshot(), shardRouting.shardId()); - } shardStateAction.shardStarted(shardRouting, "after " + state.getRecoverySource(), SHARD_STATE_ACTION_LISTENER); } @Override public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { - if (state.getRecoverySource().getType() == Type.SNAPSHOT) { - try { - if (Lucene.isCorruptionException(e.getCause())) { - SnapshotRecoverySource snapshotRecoverySource = (SnapshotRecoverySource) state.getRecoverySource(); - restoreService.failRestore(snapshotRecoverySource.snapshot(), shardRouting.shardId()); - } - } catch (Exception inner) { - e.addSuppressed(inner); - } finally { - handleRecoveryFailure(shardRouting, sendShardFailure, e); - } - } else { - handleRecoveryFailure(shardRouting, sendShardFailure, e); - } + handleRecoveryFailure(shardRouting, sendShardFailure, e); } } @@ -774,13 +767,16 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple /** * Creates a new {@link IndexService} for the given metadata. - * @param indexMetaData the index metadata to create the index for - * @param builtInIndexListener a list of built-in lifecycle {@link IndexEventListener} that should should be used along side with - * the per-index listeners + * + * @param indexMetaData the index metadata to create the index for + * @param builtInIndexListener a list of built-in lifecycle {@link IndexEventListener} that should should be used along side with + * the per-index listeners + * @param globalCheckpointSyncer the global checkpoint syncer * @throws IndexAlreadyExistsException if the index already exists. */ - U createIndex(NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData, - List builtInIndexListener, Consumer globalCheckpointSyncer) throws IOException; + U createIndex(IndexMetaData indexMetaData, + List builtInIndexListener, + Consumer globalCheckpointSyncer) throws IOException; /** * Verify that the contents on disk for the given index is deleted; if not, delete the contents. @@ -827,7 +823,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple */ T createShard(ShardRouting shardRouting, RecoveryState recoveryState, PeerRecoveryTargetService recoveryTargetService, PeerRecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService, - NodeServicesProvider nodeServicesProvider, Callback onShardFailure) throws IOException; + Callback onShardFailure) throws IOException; /** * Returns shard for the specified id if it exists otherwise returns null. diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index e5624681912..e5f8b531f67 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -31,9 +31,9 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.ClusterServiceState; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -91,7 +91,6 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde private final RecoveriesCollection onGoingRecoveries; - @Inject public PeerRecoveryTargetService(Settings settings, ThreadPool threadPool, TransportService transportService, RecoverySettings recoverySettings, ClusterService clusterService) { super(settings); @@ -401,7 +400,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde private void waitForClusterState(long clusterStateVersion) { ClusterStateObserver observer = new ClusterStateObserver(clusterService, TimeValue.timeValueMinutes(5), logger, threadPool.getThreadContext()); - final ClusterState clusterState = observer.observedState(); + final ClusterState clusterState = observer.observedState().getClusterState(); if (clusterState.getVersion() >= clusterStateVersion) { logger.trace("node has cluster state with version higher than {} (current: {})", clusterStateVersion, clusterState.getVersion()); @@ -428,20 +427,20 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde }, new ClusterStateObserver.ValidationPredicate() { @Override - protected boolean validate(ClusterState newState) { - return newState.getVersion() >= clusterStateVersion; + protected boolean validate(ClusterServiceState newState) { + return newState.getClusterState().getVersion() >= clusterStateVersion; } }); try { future.get(); logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion, - observer.observedState().getVersion()); + observer.observedState().getClusterState().getVersion()); } catch (Exception e) { logger.debug( (Supplier) () -> new ParameterizedMessage( "failed waiting for cluster state with version {} (current: {})", clusterStateVersion, - observer.observedState()), + observer.observedState().getClusterState().getVersion()), e); throw ExceptionsHelper.convertToRuntime(e); } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java index d2e07bd9e4c..c47b28f8d56 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java @@ -29,9 +29,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.Objects; -/** - * - */ public class RecoverFilesRecoveryException extends ElasticsearchException implements ElasticsearchWrapperException { private final int numberOfFiles; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java index c040c25736a..4216ea049b4 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java @@ -27,9 +27,6 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; -/** - * - */ public class RecoveryCleanFilesRequest extends TransportRequest { private long recoveryId; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFailedException.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFailedException.java index 3c3d96a4f9b..9b7f31c7e55 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFailedException.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFailedException.java @@ -27,9 +27,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ public class RecoveryFailedException extends ElasticsearchException { public RecoveryFailedException(StartRecoveryRequest request, Throwable cause) { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java index 49cdb737ed3..196e2cdf76b 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java @@ -30,9 +30,6 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; -/** - * - */ public final class RecoveryFileChunkRequest extends TransportRequest { private boolean lastChunk; private long recoveryId; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java index d9a2a19f49f..2143127c234 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java @@ -28,9 +28,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -/** - * - */ public class RecoveryFilesInfoRequest extends TransportRequest { private long recoveryId; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java index e8d5c0f299f..dca50f3f816 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java @@ -26,9 +26,6 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; -/** - * - */ public class RecoveryFinalizeRecoveryRequest extends TransportRequest { private long recoveryId; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index 171102d07ea..94425f62799 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -27,9 +27,6 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; -/** - * - */ public class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { private long maxUnsafeAutoIdTimestamp = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java index 7ec59a76ed8..9018f6f0be1 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java @@ -27,9 +27,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -/** - * - */ class RecoveryResponse extends TransportResponse { List phase1FileNames = new ArrayList<>(); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 6c4e484a2d5..d4ddccd8742 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -22,7 +22,6 @@ package org.elasticsearch.indices.recovery; import org.apache.lucene.store.RateLimiter; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -85,7 +84,6 @@ public class RecoverySettings extends AbstractComponent { private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; - @Inject public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { super(settings); @@ -142,7 +140,7 @@ public class RecoverySettings extends AbstractComponent { public ByteSizeValue getChunkSize() { return chunkSize; } - void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests + public void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests if (chunkSize.bytesAsInt() <= 0) { throw new IllegalArgumentException("chunkSize must be > 0"); } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java index 5cc294ace6e..46494626920 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -28,9 +28,6 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; import java.util.List; -/** - * - */ public class RecoveryTranslogOperationsRequest extends TransportRequest { private long recoveryId; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java index 9aa56fd8cb0..bc8a73b5622 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java @@ -29,9 +29,6 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; -/** - * - */ public class StartRecoveryRequest extends TransportRequest { private long recoveryId; diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 439806b454b..f360af7c2f7 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.ClusterServiceState; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -66,9 +67,6 @@ import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -/** - * - */ public class IndicesStore extends AbstractComponent implements ClusterStateListener, Closeable { // TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService there is no need for a separate public service @@ -94,12 +92,17 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe this.threadPool = threadPool; transportService.registerRequestHandler(ACTION_SHARD_EXISTS, ShardActiveRequest::new, ThreadPool.Names.SAME, new ShardActiveRequestHandler()); this.deleteShardTimeout = INDICES_STORE_DELETE_SHARD_TIMEOUT.get(settings); - clusterService.addLast(this); + // Doesn't make sense to delete shards on non-data nodes + if (DiscoveryNode.isDataNode(settings)) { + clusterService.add(this); + } } @Override public void close() { - clusterService.remove(this); + if (DiscoveryNode.isDataNode(settings)) { + clusterService.remove(this); + } } @Override @@ -332,7 +335,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe } }, new ClusterStateObserver.ValidationPredicate() { @Override - protected boolean validate(ClusterState newState) { + protected boolean validate(ClusterServiceState newState) { // the shard is not there in which case we want to send back a false (shard is not active), so the cluster state listener must be notified // or the shard is active in which case we want to send back that the shard is active // here we could also evaluate the cluster state and get the information from there. we diff --git a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index 341b0e57858..707df5feb1e 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -57,9 +57,6 @@ import java.util.Iterator; import java.util.List; import java.util.concurrent.TimeUnit; -/** - * - */ public class TransportNodesListShardStoreMetaData extends TransportNodesAction shardsToPurge) { for (IndexShard shardToPurge : shardsToPurge) { - Query query = shardToPurge.mapperService().fullName(TTLFieldMapper.NAME).rangeQuery(null, System.currentTimeMillis(), false, true); + Query query = shardToPurge.mapperService().fullName(TTLFieldMapper.NAME).rangeQuery(null, System.currentTimeMillis(), false, + true, null); Engine.Searcher searcher = shardToPurge.acquireSearcher("indices_ttl"); try { logger.debug("[{}][{}] purging shard", shardToPurge.routingEntry().index(), shardToPurge.routingEntry().id()); diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java b/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java index 9ad369e22d4..40f401ac6b4 100644 --- a/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java +++ b/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java @@ -116,7 +116,7 @@ public final class IngestMetadata implements MetaData.Custom { @Override public EnumSet context() { - return MetaData.API_AND_GATEWAY; + return MetaData.ALL_CONTEXTS; } @Override diff --git a/core/src/main/java/org/elasticsearch/ingest/InternalTemplateService.java b/core/src/main/java/org/elasticsearch/ingest/InternalTemplateService.java index 677be3b4b0c..6bb410c78ea 100644 --- a/core/src/main/java/org/elasticsearch/ingest/InternalTemplateService.java +++ b/core/src/main/java/org/elasticsearch/ingest/InternalTemplateService.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import java.util.Collections; import java.util.Map; @@ -42,7 +43,7 @@ public class InternalTemplateService implements TemplateService { int mustacheStart = template.indexOf("{{"); int mustacheEnd = template.indexOf("}}"); if (mustacheStart != -1 && mustacheEnd != -1 && mustacheStart < mustacheEnd) { - Script script = new Script(template, ScriptService.ScriptType.INLINE, "mustache", Collections.emptyMap()); + Script script = new Script(ScriptType.INLINE, "mustache", template, Collections.emptyMap()); CompiledScript compiledScript = scriptService.compile( script, ScriptContext.Standard.INGEST, diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index e7146636534..6c701e59c90 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -19,7 +19,7 @@ package org.elasticsearch.ingest; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; @@ -68,7 +68,7 @@ public class PipelineExecutionService implements ClusterStateListener { }); } - public void executeBulkRequest(Iterable> actionRequests, + public void executeBulkRequest(Iterable actionRequests, BiConsumer itemFailureHandler, Consumer completionHandler) { threadPool.executor(ThreadPool.Names.BULK).execute(new AbstractRunnable() { @@ -80,7 +80,7 @@ public class PipelineExecutionService implements ClusterStateListener { @Override protected void doRun() throws Exception { - for (ActionRequest actionRequest : actionRequests) { + for (DocWriteRequest actionRequest : actionRequests) { if ((actionRequest instanceof IndexRequest)) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java b/core/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java index 02f665a3464..1d4234d689e 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/DeadlockAnalyzer.java @@ -32,9 +32,6 @@ import java.util.Set; import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableSet; -/** - * - */ public class DeadlockAnalyzer { private static final Deadlock NULL_RESULT[] = new Deadlock[0]; diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java b/core/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java index 488e7a7274f..9f6a2d3f2e7 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java @@ -19,8 +19,6 @@ package org.elasticsearch.monitor.jvm; -/** - */ public class GcNames { public static final String YOUNG = "young"; diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/core/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index 181b94f13db..1714d00abb2 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -36,8 +36,6 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; -/** - */ public class HotThreads { private static final Object mutex = new Object(); diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java index f0d71db6992..f9d9fd80be0 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -/** - * - */ public class JvmService extends AbstractComponent { private final JvmInfo jvmInfo; diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java index 08abfc05f1d..8d66ba41b30 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java @@ -19,17 +19,26 @@ package org.elasticsearch.monitor.os; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.monitor.Probes; import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.management.OperatingSystemMXBean; +import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; public class OsProbe { @@ -108,50 +117,317 @@ public class OsProbe { } /** - * Returns the system load averages + * The system load averages as an array. + * + * On Windows, this method returns {@code null}. + * + * On Linux, this method returns the 1, 5, and 15-minute load averages. + * + * On macOS, this method should return the 1-minute load average. + * + * @return the available system load averages or {@code null} */ - public double[] getSystemLoadAverage() { - if (Constants.LINUX || Constants.FREE_BSD) { - final String procLoadAvg = Constants.LINUX ? "/proc/loadavg" : "/compat/linux/proc/loadavg"; - double[] loadAverage = readProcLoadavg(procLoadAvg); - if (loadAverage != null) { - return loadAverage; - } - // fallback - } + final double[] getSystemLoadAverage() { if (Constants.WINDOWS) { return null; - } - if (getSystemLoadAverage == null) { - return null; - } - try { - double oneMinuteLoadAverage = (double) getSystemLoadAverage.invoke(osMxBean); - return new double[] { oneMinuteLoadAverage >= 0 ? oneMinuteLoadAverage : -1, -1, -1 }; - } catch (Exception e) { - return null; + } else if (Constants.LINUX) { + try { + final String procLoadAvg = readProcLoadavg(); + assert procLoadAvg.matches("(\\d+\\.\\d+\\s+){3}\\d+/\\d+\\s+\\d+"); + final String[] fields = procLoadAvg.split("\\s+"); + return new double[]{Double.parseDouble(fields[0]), Double.parseDouble(fields[1]), Double.parseDouble(fields[2])}; + } catch (final IOException e) { + if (logger.isDebugEnabled()) { + logger.debug("error reading /proc/loadavg", e); + } + return null; + } + } else { + assert Constants.MAC_OS_X; + if (getSystemLoadAverage == null) { + return null; + } + try { + final double oneMinuteLoadAverage = (double) getSystemLoadAverage.invoke(osMxBean); + return new double[]{oneMinuteLoadAverage >= 0 ? oneMinuteLoadAverage : -1, -1, -1}; + } catch (IllegalAccessException | InvocationTargetException e) { + if (logger.isDebugEnabled()) { + logger.debug("error reading one minute load average from operating system", e); + } + return null; + } } } - @SuppressForbidden(reason = "access /proc") - private static double[] readProcLoadavg(String procLoadavg) { - try { - List lines = Files.readAllLines(PathUtils.get(procLoadavg)); - if (!lines.isEmpty()) { - String[] fields = lines.get(0).split("\\s+"); - return new double[] { Double.parseDouble(fields[0]), Double.parseDouble(fields[1]), Double.parseDouble(fields[2]) }; - } - } catch (IOException e) { - // do not fail Elasticsearch if something unexpected - // happens here - } - return null; + /** + * The line from {@code /proc/loadavg}. The first three fields are the load averages averaged over 1, 5, and 15 minutes. The fourth + * field is two numbers separated by a slash, the first is the number of currently runnable scheduling entities, the second is the + * number of scheduling entities on the system. The fifth field is the PID of the most recently created process. + * + * @return the line from {@code /proc/loadavg} or {@code null} + */ + @SuppressForbidden(reason = "access /proc/loadavg") + String readProcLoadavg() throws IOException { + return readSingleLine(PathUtils.get("/proc/loadavg")); } public short getSystemCpuPercent() { return Probes.getLoadAndScaleToPercent(getSystemCpuLoad, osMxBean); } + /** + * Reads a file containing a single line. + * + * @param path path to the file to read + * @return the single line + * @throws IOException if an I/O exception occurs reading the file + */ + private String readSingleLine(final Path path) throws IOException { + final List lines = Files.readAllLines(path); + assert lines != null && lines.size() == 1; + return lines.get(0); + } + + // pattern for lines in /proc/self/cgroup + private static final Pattern CONTROL_GROUP_PATTERN = Pattern.compile("\\d+:([^:,]+(?:,[^:,]+)?):(/.*)"); + + /** + * A map of the control groups to which the Elasticsearch process belongs. Note that this is a map because the control groups can vary + * from subsystem to subsystem. Additionally, this map can not be cached because a running process can be reclassified. + * + * @return a map from subsystems to the control group for the Elasticsearch process. + * @throws IOException if an I/O exception occurs reading {@code /proc/self/cgroup} + */ + private Map getControlGroups() throws IOException { + final List lines = readProcSelfCgroup(); + final Map controllerMap = new HashMap<>(); + for (final String line : lines) { + final Matcher matcher = CONTROL_GROUP_PATTERN.matcher(line); + // note that Matcher#matches must be invoked as + // matching is lazy; this can not happen in an assert + // as assertions might not be enabled + final boolean matches = matcher.matches(); + assert matches : line; + // at this point we have captured the subsystems and the + // control group + final String[] controllers = matcher.group(1).split(","); + for (final String controller : controllers) { + controllerMap.put(controller, matcher.group(2)); + } + } + return controllerMap; + } + + /** + * The lines from {@code /proc/self/cgroup}. This file represents the control groups to which the Elasticsearch process belongs. Each + * line in this file represents a control group hierarchy of the form + *

    + * {@code \d+:([^:,]+(?:,[^:,]+)?):(/.*)} + *

    + * with the first field representing the hierarchy ID, the second field representing a comma-separated list of the subsystems bound to + * the hierarchy, and the last field representing the control group. + * + * @return the lines from {@code /proc/self/cgroup} + * @throws IOException if an I/O exception occurs reading {@code /proc/self/cgroup} + */ + @SuppressForbidden(reason = "access /proc/self/cgroup") + List readProcSelfCgroup() throws IOException { + final List lines = Files.readAllLines(PathUtils.get("/proc/self/cgroup")); + assert lines != null && !lines.isEmpty(); + return lines; + } + + /** + * The total CPU time in nanoseconds consumed by all tasks in the cgroup to which the Elasticsearch process belongs for the {@code + * cpuacct} subsystem. + * + * @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem + * @return the total CPU time in nanoseconds + * @throws IOException if an I/O exception occurs reading {@code cpuacct.usage} for the control group + */ + private long getCgroupCpuAcctUsageNanos(final String controlGroup) throws IOException { + return Long.parseLong(readSysFsCgroupCpuAcctCpuAcctUsage(controlGroup)); + } + + /** + * Returns the line from {@code cpuacct.usage} for the control group to which the Elasticsearch process belongs for the {@code cpuacct} + * subsystem. This line represents the total CPU time in nanoseconds consumed by all tasks in the same control group. + * + * @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpuacct} subsystem + * @return the line from {@code cpuacct.usage} + * @throws IOException if an I/O exception occurs reading {@code cpuacct.usage} for the control group + */ + @SuppressForbidden(reason = "access /sys/fs/cgroup/cpuacct") + String readSysFsCgroupCpuAcctCpuAcctUsage(final String controlGroup) throws IOException { + return readSingleLine(PathUtils.get("/sys/fs/cgroup/cpuacct", controlGroup, "cpuacct.usage")); + } + + /** + * The total period of time in microseconds for how frequently the Elasticsearch control group's access to CPU resources will be + * reallocated. + * + * @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem + * @return the CFS quota period in microseconds + * @throws IOException if an I/O exception occurs reading {@code cpu.cfs_period_us} for the control group + */ + private long getCgroupCpuAcctCpuCfsPeriodMicros(final String controlGroup) throws IOException { + return Long.parseLong(readSysFsCgroupCpuAcctCpuCfsPeriod(controlGroup)); + } + + /** + * Returns the line from {@code cpu.cfs_period_us} for the control group to which the Elasticsearch process belongs for the {@code cpu} + * subsystem. This line represents the period of time in microseconds for how frequently the control group's access to CPU resources + * will be reallocated. + * + * @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpu} subsystem + * @return the line from {@code cpu.cfs_period_us} + * @throws IOException if an I/O exception occurs reading {@code cpu.cfs_period_us} for the control group + */ + @SuppressForbidden(reason = "access /sys/fs/cgroup/cpu") + String readSysFsCgroupCpuAcctCpuCfsPeriod(final String controlGroup) throws IOException { + return readSingleLine(PathUtils.get("/sys/fs/cgroup/cpu", controlGroup, "cpu.cfs_period_us")); + } + + /** + * The total time in microseconds that all tasks in the Elasticsearch control group can run during one period as specified by {@code + * cpu.cfs_period_us}. + * + * @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem + * @return the CFS quota in microseconds + * @throws IOException if an I/O exception occurs reading {@code cpu.cfs_quota_us} for the control group + */ + private long getCgroupCpuAcctCpuCfsQuotaMicros(final String controlGroup) throws IOException { + return Long.parseLong(readSysFsCgroupCpuAcctCpuAcctCfsQuota(controlGroup)); + } + + /** + * Returns the line from {@code cpu.cfs_quota_us} for the control group to which the Elasticsearch process belongs for the {@code cpu} + * subsystem. This line represents the total time in microseconds that all tasks in the control group can run during one period as + * specified by {@code cpu.cfs_period_us}. + * + * @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpu} subsystem + * @return the line from {@code cpu.cfs_quota_us} + * @throws IOException if an I/O exception occurs reading {@code cpu.cfs_quota_us} for the control group + */ + @SuppressForbidden(reason = "access /sys/fs/cgroup/cpu") + String readSysFsCgroupCpuAcctCpuAcctCfsQuota(final String controlGroup) throws IOException { + return readSingleLine(PathUtils.get("/sys/fs/cgroup/cpu", controlGroup, "cpu.cfs_quota_us")); + } + + /** + * The CPU time statistics for all tasks in the Elasticsearch control group. + * + * @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem + * @return the CPU time statistics + * @throws IOException if an I/O exception occurs reading {@code cpu.stat} for the control group + */ + private OsStats.Cgroup.CpuStat getCgroupCpuAcctCpuStat(final String controlGroup) throws IOException { + final List lines = readSysFsCgroupCpuAcctCpuStat(controlGroup); + long numberOfPeriods = -1; + long numberOfTimesThrottled = -1; + long timeThrottledNanos = -1; + for (final String line : lines) { + final String[] fields = line.split("\\s+"); + switch (fields[0]) { + case "nr_periods": + numberOfPeriods = Long.parseLong(fields[1]); + break; + case "nr_throttled": + numberOfTimesThrottled = Long.parseLong(fields[1]); + break; + case "throttled_time": + timeThrottledNanos = Long.parseLong(fields[1]); + break; + } + } + assert numberOfPeriods != -1; + assert numberOfTimesThrottled != -1; + assert timeThrottledNanos != -1; + return new OsStats.Cgroup.CpuStat(numberOfPeriods, numberOfTimesThrottled, timeThrottledNanos); + } + + /** + * Returns the lines from {@code cpu.stat} for the control group to which the Elasticsearch process belongs for the {@code cpu} + * subsystem. These lines represent the CPU time statistics and have the form + *

    +     * nr_periods \d+
    +     * nr_throttled \d+
    +     * throttled_time \d+
    +     * 
    + * where {@code nr_periods} is the number of period intervals as specified by {@code cpu.cfs_period_us} that have elapsed, {@code + * nr_throttled} is the number of times tasks in the given control group have been throttled, and {@code throttled_time} is the total + * time in nanoseconds for which tasks in the given control group have been throttled. + * + * @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpu} subsystem + * @return the lines from {@code cpu.stat} + * @throws IOException if an I/O exception occurs reading {@code cpu.stat} for the control group + */ + @SuppressForbidden(reason = "access /sys/fs/cgroup/cpu") + List readSysFsCgroupCpuAcctCpuStat(final String controlGroup) throws IOException { + final List lines = Files.readAllLines(PathUtils.get("/sys/fs/cgroup/cpu", controlGroup, "cpu.stat")); + assert lines != null && lines.size() == 3; + return lines; + } + + /** + * Checks if cgroup stats are available by checking for the existence of {@code /proc/self/cgroup}, {@code /sys/fs/cgroup/cpu}, and + * {@code /sys/fs/cgroup/cpuacct}. + * + * @return {@code true} if the stats are available, otherwise {@code false} + */ + @SuppressForbidden(reason = "access /proc/self/cgroup, /sys/fs/cgroup/cpu, and /sys/fs/cgroup/cpuacct") + protected boolean areCgroupStatsAvailable() { + if (!Files.exists(PathUtils.get("/proc/self/cgroup"))) { + return false; + } + if (!Files.exists(PathUtils.get("/sys/fs/cgroup/cpu"))) { + return false; + } + if (!Files.exists(PathUtils.get("/sys/fs/cgroup/cpuacct"))) { + return false; + } + return true; + } + + /** + * Basic cgroup stats. + * + * @return basic cgroup stats, or {@code null} if an I/O exception occurred reading the cgroup stats + */ + private OsStats.Cgroup getCgroup() { + try { + if (!areCgroupStatsAvailable()) { + return null; + } else { + final Map controllerMap = getControlGroups(); + assert !controllerMap.isEmpty(); + + final String cpuAcctControlGroup = controllerMap.get("cpuacct"); + assert cpuAcctControlGroup != null; + final long cgroupCpuAcctUsageNanos = getCgroupCpuAcctUsageNanos(cpuAcctControlGroup); + + final String cpuControlGroup = controllerMap.get("cpu"); + assert cpuControlGroup != null; + final long cgroupCpuAcctCpuCfsPeriodMicros = getCgroupCpuAcctCpuCfsPeriodMicros(cpuControlGroup); + final long cgroupCpuAcctCpuCfsQuotaMicros = getCgroupCpuAcctCpuCfsQuotaMicros(cpuControlGroup); + final OsStats.Cgroup.CpuStat cpuStat = getCgroupCpuAcctCpuStat(cpuControlGroup); + + return new OsStats.Cgroup( + cpuAcctControlGroup, + cgroupCpuAcctUsageNanos, + cpuControlGroup, + cgroupCpuAcctCpuCfsPeriodMicros, + cgroupCpuAcctCpuCfsQuotaMicros, + cpuStat); + } + } catch (final IOException e) { + if (logger.isDebugEnabled()) { + logger.debug("error reading control group stats", e); + } + return null; + } + } + private static class OsProbeHolder { private static final OsProbe INSTANCE = new OsProbe(); } @@ -160,24 +436,27 @@ public class OsProbe { return OsProbeHolder.INSTANCE; } - private OsProbe() { + OsProbe() { + } + private final Logger logger = ESLoggerFactory.getLogger(getClass()); + public OsInfo osInfo(long refreshInterval, int allocatedProcessors) { return new OsInfo(refreshInterval, Runtime.getRuntime().availableProcessors(), allocatedProcessors, Constants.OS_NAME, Constants.OS_ARCH, Constants.OS_VERSION); } public OsStats osStats() { - OsStats.Cpu cpu = new OsStats.Cpu(getSystemCpuPercent(), getSystemLoadAverage()); - OsStats.Mem mem = new OsStats.Mem(getTotalPhysicalMemorySize(), getFreePhysicalMemorySize()); - OsStats.Swap swap = new OsStats.Swap(getTotalSwapSpaceSize(), getFreeSwapSpaceSize()); - return new OsStats(System.currentTimeMillis(), cpu, mem , swap); + final OsStats.Cpu cpu = new OsStats.Cpu(getSystemCpuPercent(), getSystemLoadAverage()); + final OsStats.Mem mem = new OsStats.Mem(getTotalPhysicalMemorySize(), getFreePhysicalMemorySize()); + final OsStats.Swap swap = new OsStats.Swap(getTotalSwapSpaceSize(), getFreeSwapSpaceSize()); + final OsStats.Cgroup cgroup = Constants.LINUX ? getCgroup() : null; + return new OsStats(System.currentTimeMillis(), cpu, mem, swap, cgroup); } /** - * Returns a given method of the OperatingSystemMXBean, - * or null if the method is not found or unavailable. + * Returns a given method of the OperatingSystemMXBean, or null if the method is not found or unavailable. */ private static Method getMethod(String methodName) { try { @@ -187,4 +466,5 @@ public class OsProbe { return null; } } + } diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java index cb67eef852c..f37daddbb06 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java @@ -41,7 +41,7 @@ public class OsService extends AbstractComponent { super(settings); this.probe = OsProbe.getInstance(); TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings); - this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.boundedNumberOfProcessors(settings)); + this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.numberOfProcessors(settings)); this.osStatsCache = new OsStatsCache(refreshInterval, probe.osStats()); logger.debug("using refresh_interval [{}]", refreshInterval); } diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java index e07b92a6cb4..aec443280d0 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -19,6 +19,7 @@ package org.elasticsearch.monitor.os; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -31,17 +32,19 @@ import java.util.Arrays; import java.util.Objects; public class OsStats implements Writeable, ToXContent { - + public static final Version V_5_1_0 = Version.fromId(5010099); private final long timestamp; private final Cpu cpu; private final Mem mem; private final Swap swap; + private final Cgroup cgroup; - public OsStats(long timestamp, Cpu cpu, Mem mem, Swap swap) { + public OsStats(final long timestamp, final Cpu cpu, final Mem mem, final Swap swap, final Cgroup cgroup) { this.timestamp = timestamp; - this.cpu = Objects.requireNonNull(cpu, "cpu must not be null"); - this.mem = Objects.requireNonNull(mem, "mem must not be null");; - this.swap = Objects.requireNonNull(swap, "swap must not be null");; + this.cpu = Objects.requireNonNull(cpu); + this.mem = Objects.requireNonNull(mem); + this.swap = Objects.requireNonNull(swap); + this.cgroup = cgroup; } public OsStats(StreamInput in) throws IOException { @@ -49,6 +52,11 @@ public class OsStats implements Writeable, ToXContent { this.cpu = new Cpu(in); this.mem = new Mem(in); this.swap = new Swap(in); + if (in.getVersion().onOrAfter(V_5_1_0)) { + this.cgroup = in.readOptionalWriteable(Cgroup::new); + } else { + this.cgroup = null; + } } @Override @@ -57,6 +65,9 @@ public class OsStats implements Writeable, ToXContent { cpu.writeTo(out); mem.writeTo(out); swap.writeTo(out); + if (out.getVersion().onOrAfter(V_5_1_0)) { + out.writeOptionalWriteable(cgroup); + } } public long getTimestamp() { @@ -73,6 +84,10 @@ public class OsStats implements Writeable, ToXContent { return swap; } + public Cgroup getCgroup() { + return cgroup; + } + static final class Fields { static final String OS = "os"; static final String TIMESTAMP = "timestamp"; @@ -103,6 +118,9 @@ public class OsStats implements Writeable, ToXContent { cpu.toXContent(builder, params); mem.toXContent(builder, params); swap.toXContent(builder, params); + if (cgroup != null) { + cgroup.toXContent(builder, params); + } builder.endObject(); return builder; } @@ -265,7 +283,211 @@ public class OsStats implements Writeable, ToXContent { } } + /** + * Encapsulates basic cgroup statistics. + */ + public static class Cgroup implements Writeable, ToXContent { + + private final String cpuAcctControlGroup; + private final long cpuAcctUsageNanos; + private final String cpuControlGroup; + private final long cpuCfsPeriodMicros; + private final long cpuCfsQuotaMicros; + private final CpuStat cpuStat; + + /** + * The control group for the {@code cpuacct} subsystem. + * + * @return the control group + */ + public String getCpuAcctControlGroup() { + return cpuAcctControlGroup; + } + + /** + * The total CPU time consumed by all tasks in the + * {@code cpuacct} control group from + * {@link Cgroup#cpuAcctControlGroup}. + * + * @return the total CPU time in nanoseconds + */ + public long getCpuAcctUsageNanos() { + return cpuAcctUsageNanos; + } + + /** + * The control group for the {@code cpu} subsystem. + * + * @return the control group + */ + public String getCpuControlGroup() { + return cpuControlGroup; + } + + /** + * The period of time for how frequently the control group from + * {@link Cgroup#cpuControlGroup} has its access to CPU + * resources reallocated. + * + * @return the period of time in microseconds + */ + public long getCpuCfsPeriodMicros() { + return cpuCfsPeriodMicros; + } + + /** + * The total amount of time for which all tasks in the control + * group from {@link Cgroup#cpuControlGroup} can run in one + * period as represented by {@link Cgroup#cpuCfsPeriodMicros}. + * + * @return the total amount of time in microseconds + */ + public long getCpuCfsQuotaMicros() { + return cpuCfsQuotaMicros; + } + + /** + * The CPU time statistics. See {@link CpuStat}. + * + * @return the CPU time statistics. + */ + public CpuStat getCpuStat() { + return cpuStat; + } + + public Cgroup( + final String cpuAcctControlGroup, + final long cpuAcctUsageNanos, + final String cpuControlGroup, + final long cpuCfsPeriodMicros, + final long cpuCfsQuotaMicros, + final CpuStat cpuStat) { + this.cpuAcctControlGroup = Objects.requireNonNull(cpuAcctControlGroup); + this.cpuAcctUsageNanos = cpuAcctUsageNanos; + this.cpuControlGroup = Objects.requireNonNull(cpuControlGroup); + this.cpuCfsPeriodMicros = cpuCfsPeriodMicros; + this.cpuCfsQuotaMicros = cpuCfsQuotaMicros; + this.cpuStat = Objects.requireNonNull(cpuStat); + } + + Cgroup(final StreamInput in) throws IOException { + cpuAcctControlGroup = in.readString(); + cpuAcctUsageNanos = in.readLong(); + cpuControlGroup = in.readString(); + cpuCfsPeriodMicros = in.readLong(); + cpuCfsQuotaMicros = in.readLong(); + cpuStat = new CpuStat(in); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(cpuAcctControlGroup); + out.writeLong(cpuAcctUsageNanos); + out.writeString(cpuControlGroup); + out.writeLong(cpuCfsPeriodMicros); + out.writeLong(cpuCfsQuotaMicros); + cpuStat.writeTo(out); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject("cgroup"); + { + builder.startObject("cpuacct"); + { + builder.field("control_group", cpuAcctControlGroup); + builder.field("usage_nanos", cpuAcctUsageNanos); + } + builder.endObject(); + builder.startObject("cpu"); + { + builder.field("control_group", cpuControlGroup); + builder.field("cfs_period_micros", cpuCfsPeriodMicros); + builder.field("cfs_quota_micros", cpuCfsQuotaMicros); + cpuStat.toXContent(builder, params); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + + /** + * Encapsulates CPU time statistics. + */ + public static class CpuStat implements Writeable, ToXContent { + + private final long numberOfElapsedPeriods; + private final long numberOfTimesThrottled; + private final long timeThrottledNanos; + + /** + * The number of elapsed periods. + * + * @return the number of elapsed periods as measured by + * {@code cpu.cfs_period_us} + */ + public long getNumberOfElapsedPeriods() { + return numberOfElapsedPeriods; + } + + /** + * The number of times tasks in the control group have been + * throttled. + * + * @return the number of times + */ + public long getNumberOfTimesThrottled() { + return numberOfTimesThrottled; + } + + /** + * The total time duration for which tasks in the control + * group have been throttled. + * + * @return the total time in nanoseconds + */ + public long getTimeThrottledNanos() { + return timeThrottledNanos; + } + + public CpuStat(final long numberOfElapsedPeriods, final long numberOfTimesThrottled, final long timeThrottledNanos) { + this.numberOfElapsedPeriods = numberOfElapsedPeriods; + this.numberOfTimesThrottled = numberOfTimesThrottled; + this.timeThrottledNanos = timeThrottledNanos; + } + + CpuStat(final StreamInput in) throws IOException { + numberOfElapsedPeriods = in.readLong(); + numberOfTimesThrottled = in.readLong(); + timeThrottledNanos = in.readLong(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeLong(numberOfElapsedPeriods); + out.writeLong(numberOfTimesThrottled); + out.writeLong(timeThrottledNanos); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("stat"); + { + builder.field("number_of_elapsed_periods", numberOfElapsedPeriods); + builder.field("number_of_times_throttled", numberOfTimesThrottled); + builder.field("time_throttled_nanos", timeThrottledNanos); + } + builder.endObject(); + return builder; + } + + } + + } + public static short calculatePercentage(long used, long max) { return max <= 0 ? 0 : (short) (Math.round((100d * used) / max)); } + } diff --git a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java index 99593003b34..1cae0602ff2 100644 --- a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java +++ b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java @@ -26,9 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; -/** - * - */ public final class ProcessService extends AbstractComponent { private final ProcessProbe probe; diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 2d03a057b58..b449d8da746 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -32,6 +32,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.GenericAction; import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.client.Client; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterModule; @@ -41,6 +42,7 @@ import org.elasticsearch.cluster.MasterNodeChangePredicate; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -74,6 +76,11 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.NoneDiscovery; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.UnicastZenPing; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.ZenPing; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayAllocator; @@ -91,6 +98,9 @@ import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.indices.recovery.PeerRecoverySourceService; +import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; +import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.ingest.IngestService; @@ -145,16 +155,22 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Supplier; import java.util.function.UnaryOperator; import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; +import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_TYPE_SETTING; + /** * A node represent a node within a cluster (cluster.name). The {@link #client()} can be used * in order to use a {@link Client} to perform actions/operations against the cluster. @@ -314,7 +330,8 @@ public class Node implements Closeable { final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool); clusterService.add(scriptModule.getScriptService()); resourcesToClose.add(clusterService); - final TribeService tribeService = new TribeService(settings, clusterService, nodeEnvironment.nodeId(), classpathPlugins); + final TribeService tribeService = new TribeService(settings, clusterService, nodeEnvironment.nodeId(), + s -> newTribeClientNode(s, classpathPlugins)); resourcesToClose.add(tribeService); final IngestService ingestService = new IngestService(settings, threadPool, this.environment, scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class)); @@ -326,7 +343,6 @@ public class Node implements Closeable { } final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool); modules.add(new NodeModule(this, monitorService)); - modules.add(new DiscoveryModule(this.settings)); ClusterModule clusterModule = new ClusterModule(settings, clusterService, pluginsService.filterPlugins(ClusterPlugin.class)); modules.add(clusterModule); @@ -339,7 +355,6 @@ public class Node implements Closeable { modules.add(actionModule); modules.add(new GatewayModule()); modules.add(new RepositoriesModule(this.environment, pluginsService.filterPlugins(RepositoryPlugin.class))); - pluginsService.processModules(modules); CircuitBreakerService circuitBreakerService = createCircuitBreakerService(settingsModule.getSettings(), settingsModule.getClusterSettings()); resourcesToClose.add(circuitBreakerService); @@ -355,12 +370,13 @@ public class Node implements Closeable { .flatMap(Function.identity()).collect(Collectors.toList()); final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); final MetaStateService metaStateService = new MetaStateService(settings, nodeEnvironment); + client = new NodeClient(settings, threadPool); final IndicesService indicesService = new IndicesService(settings, pluginsService, nodeEnvironment, settingsModule.getClusterSettings(), analysisModule.getAnalysisRegistry(), searchModule.getQueryParserRegistry(), clusterModule.getIndexNameExpressionResolver(), indicesModule.getMapperRegistry(), namedWriteableRegistry, - threadPool, settingsModule.getIndexScopedSettings(), circuitBreakerService, metaStateService); + threadPool, settingsModule.getIndexScopedSettings(), circuitBreakerService, bigArrays, scriptModule.getScriptService(), + clusterService, client, metaStateService); - client = new NodeClient(settings, threadPool); Collection pluginComponents = pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService, scriptModule.getScriptService(), searchModule.getSearchRequestParsers()).stream()) @@ -374,7 +390,7 @@ public class Node implements Closeable { final MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(customMetaDataUpgraders); final Transport transport = networkModule.getTransportSupplier().get(); final TransportService transportService = newTransportService(settings, transport, threadPool, - networkModule.getTransportInterceptor()); + networkModule.getTransportInterceptor(), settingsModule.getClusterSettings()); final Consumer httpBind; if (networkModule.isHttpEnabled()) { HttpServerTransport httpServerTransport = networkModule.getHttpServerTransportSupplier().get(); @@ -389,6 +405,11 @@ public class Node implements Closeable { b.bind(HttpServer.class).toProvider(Providers.of(null)); }; } + + final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, + networkService, clusterService, hostsProvider -> newZenPing(settings, threadPool, transportService, hostsProvider), + pluginsService.filterPlugins(DiscoveryPlugin.class)); + pluginsService.processModules(modules); modules.add(b -> { b.bind(IndicesQueriesRegistry.class).toInstance(searchModule.getQueryParserRegistry()); b.bind(SearchRequestParsers.class).toInstance(searchModule.getSearchRequestParsers()); @@ -416,6 +437,19 @@ public class Node implements Closeable { b.bind(TransportService.class).toInstance(transportService); b.bind(NetworkService.class).toInstance(networkService); b.bind(AllocationCommandRegistry.class).toInstance(NetworkModule.getAllocationCommandRegistry()); + b.bind(UpdateHelper.class).toInstance(new UpdateHelper(settings, scriptModule.getScriptService())); + b.bind(MetaDataIndexUpgradeService.class).toInstance(new MetaDataIndexUpgradeService(settings, + indicesModule.getMapperRegistry(), settingsModule.getIndexScopedSettings())); + b.bind(Discovery.class).toInstance(discoveryModule.getDiscovery()); + b.bind(ZenPing.class).toInstance(discoveryModule.getZenPing()); + { + RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + processRecoverySettings(settingsModule.getClusterSettings(), recoverySettings); + b.bind(PeerRecoverySourceService.class).toInstance(new PeerRecoverySourceService(settings, transportService, + indicesService, recoverySettings, clusterService)); + b.bind(PeerRecoveryTargetService.class).toInstance(new PeerRecoveryTargetService(settings, threadPool, + transportService, recoverySettings, clusterService)); + } httpBind.accept(b); pluginComponents.stream().forEach(p -> b.bind((Class) p.getClass()).toInstance(p)); } @@ -430,7 +464,7 @@ public class Node implements Closeable { resourcesToClose.addAll(pluginLifecycleComponents); this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents); - client.intialize(injector.getInstance(new Key>() {})); + client.initialize(injector.getInstance(new Key>() {})); logger.info("initialized"); @@ -458,8 +492,12 @@ public class Node implements Closeable { } protected TransportService newTransportService(Settings settings, Transport transport, ThreadPool threadPool, - TransportInterceptor interceptor) { - return new TransportService(settings, transport, threadPool, interceptor); + TransportInterceptor interceptor, ClusterSettings clusterSettings) { + return new TransportService(settings, transport, threadPool, interceptor, clusterSettings); + } + + protected void processRecoverySettings(ClusterSettings clusterSettings, RecoverySettings recoverySettings) { + // Noop in production, overridden by tests } /** @@ -561,7 +599,7 @@ public class Node implements Closeable { if (DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings).millis() > 0) { final ThreadPool thread = injector.getInstance(ThreadPool.class); ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, thread.getThreadContext()); - if (observer.observedState().nodes().getMasterNodeId() == null) { + if (observer.observedState().getClusterState().nodes().getMasterNodeId() == null) { final CountDownLatch latch = new CountDownLatch(1); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override @@ -860,4 +898,15 @@ public class Node implements Closeable { } return customNameResolvers; } + + /** Create a new ZenPing instance for use in zen discovery. */ + protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, + UnicastHostsProvider hostsProvider) { + return new UnicastZenPing(settings, threadPool, transportService, hostsProvider); + } + + /** Constructs an internal node used as a client into a cluster fronted by this tribe node. */ + protected Node newTribeClientNode(Settings settings, Collection> classpathPlugins) { + return new Node(new Environment(settings), classpathPlugins); + } } diff --git a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java index dba7f303130..8fb86ebdac0 100644 --- a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java @@ -41,9 +41,6 @@ import java.util.function.Predicate; import static org.elasticsearch.common.Strings.cleanPath; -/** - * - */ public class InternalSettingsPreparer { private static final String[] ALLOWED_SUFFIXES = {".yml", ".yaml", ".json"}; diff --git a/core/src/main/java/org/elasticsearch/node/service/NodeService.java b/core/src/main/java/org/elasticsearch/node/service/NodeService.java index 39e151c886f..cf1f70590c5 100644 --- a/core/src/main/java/org/elasticsearch/node/service/NodeService.java +++ b/core/src/main/java/org/elasticsearch/node/service/NodeService.java @@ -44,8 +44,6 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -/** - */ public class NodeService extends AbstractComponent implements Closeable { private final ThreadPool threadPool; diff --git a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java index f6174c08d12..adb8bfcc388 100644 --- a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java @@ -19,8 +19,18 @@ package org.elasticsearch.plugins; +import java.util.Collections; +import java.util.Map; +import java.util.function.Supplier; + +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.ZenPing; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; /** * An additional extension point for {@link Plugin}s that extends Elasticsearch's discovery functionality. To add an additional @@ -36,6 +46,24 @@ import org.elasticsearch.common.settings.Settings; * } */ public interface DiscoveryPlugin { + + /** + * Returns custom discovery implementations added by this plugin. + * + * The key of the returned map is the name of the discovery implementation + * (see {@link org.elasticsearch.discovery.DiscoveryModule#DISCOVERY_TYPE_SETTING}, and + * the value is a supplier to construct the {@link Discovery}. + * + * @param threadPool Use to schedule ping actions + * @param transportService Use to communicate with other nodes + * @param clusterService Use to find current nodes in the cluster + * @param zenPing Use to ping other nodes with zen unicast host list + */ + default Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, ZenPing zenPing) { + return Collections.emptyMap(); + } + /** * Override to add additional {@link NetworkService.CustomNameResolver}s. * This can be handy if you want to provide your own Network interface name like _mycard_ @@ -52,4 +80,20 @@ public interface DiscoveryPlugin { default NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { return null; } + + /** + * Returns providers of unicast host lists for zen discovery. + * + * The key of the returned map is the name of the host provider + * (see {@link org.elasticsearch.discovery.DiscoveryModule#DISCOVERY_HOSTS_PROVIDER_SETTING}), and + * the value is a supplier to construct the host provider when it is selected for use. + * + * @param transportService Use to form the {@link org.elasticsearch.common.transport.TransportAddress} portion + * of a {@link org.elasticsearch.cluster.node.DiscoveryNode} + * @param networkService Use to find the publish host address of the current node + */ + default Map> getZenHostsProviders(TransportService transportService, + NetworkService networkService) { + return Collections.emptyMap(); + } } diff --git a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index ac21256acaa..43af643854e 100644 --- a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -142,29 +142,31 @@ class InstallPluginCommand extends SettingCommand { private final OptionSpec batchOption; private final OptionSpec arguments; - - static final Set DIR_AND_EXECUTABLE_PERMS; - static final Set FILE_PERMS; + static final Set BIN_DIR_PERMS; + static final Set BIN_FILES_PERMS; + static final Set CONFIG_DIR_PERMS; + static final Set CONFIG_FILES_PERMS; + static final Set PLUGIN_DIR_PERMS; + static final Set PLUGIN_FILES_PERMS; static { - Set dirAndExecutablePerms = new HashSet<>(7); - // Directories and executables get chmod 755 - dirAndExecutablePerms.add(PosixFilePermission.OWNER_EXECUTE); - dirAndExecutablePerms.add(PosixFilePermission.OWNER_READ); - dirAndExecutablePerms.add(PosixFilePermission.OWNER_WRITE); - dirAndExecutablePerms.add(PosixFilePermission.GROUP_EXECUTE); - dirAndExecutablePerms.add(PosixFilePermission.GROUP_READ); - dirAndExecutablePerms.add(PosixFilePermission.OTHERS_READ); - dirAndExecutablePerms.add(PosixFilePermission.OTHERS_EXECUTE); - DIR_AND_EXECUTABLE_PERMS = Collections.unmodifiableSet(dirAndExecutablePerms); + // Bin directory get chmod 755 + BIN_DIR_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rwxr-xr-x")); - Set filePerms = new HashSet<>(4); - // Files get chmod 644 - filePerms.add(PosixFilePermission.OWNER_READ); - filePerms.add(PosixFilePermission.OWNER_WRITE); - filePerms.add(PosixFilePermission.GROUP_READ); - filePerms.add(PosixFilePermission.OTHERS_READ); - FILE_PERMS = Collections.unmodifiableSet(filePerms); + // Bin files also get chmod 755 + BIN_FILES_PERMS = BIN_DIR_PERMS; + + // Config directory get chmod 750 + CONFIG_DIR_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rwxr-x---")); + + // Config files get chmod 660 + CONFIG_FILES_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rw-rw----")); + + // Plugin directory get chmod 755 + PLUGIN_DIR_PERMS = BIN_DIR_PERMS; + + // Plugins files get chmod 644 + PLUGIN_FILES_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rw-r--r--")); } InstallPluginCommand() { @@ -269,6 +271,7 @@ class InstallPluginCommand extends SettingCommand { URL url = new URL(urlString); Path zip = Files.createTempFile(tmpDir, null, ".zip"); URLConnection urlConnection = url.openConnection(); + urlConnection.addRequestProperty("User-Agent", "elasticsearch-plugin-installer"); int contentLength = urlConnection.getContentLength(); try (InputStream in = new TerminalProgressInputStream(urlConnection.getInputStream(), contentLength, terminal)) { // must overwrite since creating the temp file above actually created the file @@ -386,7 +389,7 @@ class InstallPluginCommand extends SettingCommand { private Path stagingDirectory(Path pluginsDir) throws IOException { try { - return Files.createTempDirectory(pluginsDir, ".installing-", PosixFilePermissions.asFileAttribute(DIR_AND_EXECUTABLE_PERMS)); + return Files.createTempDirectory(pluginsDir, ".installing-", PosixFilePermissions.asFileAttribute(PLUGIN_DIR_PERMS)); } catch (IllegalArgumentException e) { // Jimfs throws an IAE where it should throw an UOE // remove when google/jimfs#30 is integrated into Jimfs @@ -415,7 +418,7 @@ class InstallPluginCommand extends SettingCommand { PluginInfo info = PluginInfo.readFromProperties(pluginRoot); terminal.println(VERBOSE, info.toString()); - // don't let luser install plugin as a module... + // don't let user install plugin as a module... // they might be unavoidably in maven central and are packaged up the same way) if (MODULES.contains(info.getName())) { throw new UserException( @@ -493,9 +496,9 @@ class InstallPluginCommand extends SettingCommand { try (DirectoryStream stream = Files.newDirectoryStream(destination)) { for (Path pluginFile : stream) { if (Files.isDirectory(pluginFile)) { - setFileAttributes(pluginFile, DIR_AND_EXECUTABLE_PERMS); + setFileAttributes(pluginFile, PLUGIN_DIR_PERMS); } else { - setFileAttributes(pluginFile, FILE_PERMS); + setFileAttributes(pluginFile, PLUGIN_FILES_PERMS); } } } @@ -517,7 +520,7 @@ class InstallPluginCommand extends SettingCommand { throw new UserException(ExitCodes.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory"); } Files.createDirectory(destBinDir); - setFileAttributes(destBinDir, DIR_AND_EXECUTABLE_PERMS); + setFileAttributes(destBinDir, BIN_DIR_PERMS); try (DirectoryStream stream = Files.newDirectoryStream(tmpBinDir)) { for (Path srcFile : stream) { @@ -529,7 +532,7 @@ class InstallPluginCommand extends SettingCommand { Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile)); Files.copy(srcFile, destFile); - setFileAttributes(destFile, DIR_AND_EXECUTABLE_PERMS); + setFileAttributes(destFile, BIN_FILES_PERMS); } } IOUtils.rm(tmpBinDir); // clean up what we just copied @@ -545,7 +548,7 @@ class InstallPluginCommand extends SettingCommand { } Files.createDirectories(destConfigDir); - setFileAttributes(destConfigDir, DIR_AND_EXECUTABLE_PERMS); + setFileAttributes(destConfigDir, CONFIG_DIR_PERMS); final PosixFileAttributeView destConfigDirAttributesView = Files.getFileAttributeView(destConfigDir.getParent(), PosixFileAttributeView.class); final PosixFileAttributes destConfigDirAttributes = @@ -563,7 +566,7 @@ class InstallPluginCommand extends SettingCommand { Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile)); if (Files.exists(destFile) == false) { Files.copy(srcFile, destFile); - setFileAttributes(destFile, FILE_PERMS); + setFileAttributes(destFile, CONFIG_FILES_PERMS); if (destConfigDirAttributes != null) { setOwnerGroup(destFile, destConfigDirAttributes); } diff --git a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java index ee81261c080..bd2f853bac0 100644 --- a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java @@ -60,8 +60,8 @@ class ListPluginsCommand extends SettingCommand { } Collections.sort(plugins); for (final Path plugin : plugins) { + terminal.println(plugin.getFileName().toString()); PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath())); - terminal.println(plugin.getFileName().toString() + "@" + info.getVersion()); terminal.println(Terminal.Verbosity.VERBOSE, info.toString()); } } diff --git a/core/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java b/core/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java index ee7187c8853..70bac7fac49 100644 --- a/core/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java @@ -42,7 +42,7 @@ public interface NetworkPlugin { * Returns a list of {@link TransportInterceptor} instances that are used to intercept incoming and outgoing * transport (inter-node) requests. This must not return null */ - default List getTransportInterceptors() { + default List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry) { return Collections.emptyList(); } diff --git a/core/src/main/java/org/elasticsearch/plugins/Plugin.java b/core/src/main/java/org/elasticsearch/plugins/Plugin.java index 1e39edc6341..c2f5128a314 100644 --- a/core/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.index.IndexModule; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.script.ScriptModule; @@ -206,7 +207,7 @@ public abstract class Plugin { public final void onModule(ActionModule module) {} /** - * Old-style action extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading + * Old-style search extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading * from 2.x. * * @deprecated implement {@link SearchPlugin} instead @@ -215,11 +216,20 @@ public abstract class Plugin { public final void onModule(SearchModule module) {} /** - * Old-style action extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading + * Old-style network extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading * from 2.x. * * @deprecated implement {@link NetworkPlugin} instead */ @Deprecated public final void onModule(NetworkModule module) {} + + /** + * Old-style discovery extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading + * from 2.x. + * + * @deprecated implement {@link DiscoveryPlugin} instead + */ + @Deprecated + public final void onModule(DiscoveryModule module) {} } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index 03139e565ed..d14890c7d17 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -66,9 +66,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory; -/** - * - */ public class PluginsService extends AbstractComponent { /** diff --git a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java index 54cd34d6742..cf21f4cc830 100644 --- a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java @@ -19,6 +19,19 @@ package org.elasticsearch.plugins; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; + +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.SettingCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.node.internal.InternalSettingsPreparer; + import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; @@ -26,18 +39,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.SettingCommand; -import org.elasticsearch.cli.UserException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.cli.Terminal; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; - import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; /** @@ -67,7 +68,7 @@ final class RemovePluginCommand extends SettingCommand { final Path pluginDir = env.pluginsFile().resolve(pluginName); if (Files.exists(pluginDir) == false) { throw new UserException( - ExitCodes.USAGE, + ExitCodes.CONFIG, "plugin " + pluginName + " not found; run 'elasticsearch-plugin list' to get list of installed plugins"); } diff --git a/core/src/main/java/org/elasticsearch/repositories/VerificationFailure.java b/core/src/main/java/org/elasticsearch/repositories/VerificationFailure.java index b9cc46e850c..14eb27ec70e 100644 --- a/core/src/main/java/org/elasticsearch/repositories/VerificationFailure.java +++ b/core/src/main/java/org/elasticsearch/repositories/VerificationFailure.java @@ -25,8 +25,6 @@ import org.elasticsearch.common.io.stream.Streamable; import java.io.IOException; -/** - */ public class VerificationFailure implements Streamable { private String nodeId; diff --git a/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java b/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java index f146267c9be..4b3505e97e3 100644 --- a/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java +++ b/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java @@ -40,12 +40,20 @@ public abstract class AbstractRestChannel implements RestChannel { protected final RestRequest request; protected final boolean detailedErrorsEnabled; + private final String format; + private final String filterPath; + private final boolean pretty; + private final boolean human; private BytesStreamOutput bytesOut; protected AbstractRestChannel(RestRequest request, boolean detailedErrorsEnabled) { this.request = request; this.detailedErrorsEnabled = detailedErrorsEnabled; + this.format = request.param("format", request.header("Accept")); + this.filterPath = request.param("filter_path", null); + this.pretty = request.paramAsBoolean("pretty", false); + this.human = request.paramAsBoolean("human", false); } @Override @@ -61,7 +69,7 @@ public abstract class AbstractRestChannel implements RestChannel { @Override public XContentBuilder newBuilder(@Nullable BytesReference autoDetectSource, boolean useFiltering) throws IOException { - XContentType contentType = XContentType.fromMediaTypeOrFormat(request.param("format", request.header("Accept"))); + XContentType contentType = XContentType.fromMediaTypeOrFormat(format); if (contentType == null) { // try and guess it from the auto detect source if (autoDetectSource != null) { @@ -76,17 +84,17 @@ public abstract class AbstractRestChannel implements RestChannel { Set includes = Collections.emptySet(); Set excludes = Collections.emptySet(); if (useFiltering) { - Set filters = Strings.splitStringByCommaToSet(request.param("filter_path", null)); + Set filters = Strings.splitStringByCommaToSet(filterPath); includes = filters.stream().filter(INCLUDE_FILTER).collect(toSet()); excludes = filters.stream().filter(EXCLUDE_FILTER).map(f -> f.substring(1)).collect(toSet()); } XContentBuilder builder = new XContentBuilder(XContentFactory.xContent(contentType), bytesOutput(), includes, excludes); - if (request.paramAsBoolean("pretty", false)) { + if (pretty) { builder.prettyPrint().lfAtEnd(); } - builder.humanReadable(request.paramAsBoolean("human", builder.humanReadable())); + builder.humanReadable(human); return builder; } diff --git a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 31e09d6706e..3bb6c6773b9 100644 --- a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -19,13 +19,28 @@ package org.elasticsearch.rest; +import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.ActionPlugin; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; + /** * Base handler for REST requests. *

    @@ -35,6 +50,7 @@ import org.elasticsearch.plugins.ActionPlugin; * {@link ActionPlugin#getRestHeaders()}. */ public abstract class BaseRestHandler extends AbstractComponent implements RestHandler { + public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = Setting.boolSetting("rest.action.multi.allow_explicit_index", true, Property.NodeScope); protected final ParseFieldMatcher parseFieldMatcher; @@ -43,4 +59,101 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH super(settings); this.parseFieldMatcher = new ParseFieldMatcher(settings); } + + @Override + public final void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + // prepare the request for execution; has the side effect of touching the request parameters + final RestChannelConsumer action = prepareRequest(request, client); + + // validate unconsumed params, but we must exclude params used to format the response + // use a sorted set so the unconsumed parameters appear in a reliable sorted order + final SortedSet unconsumedParams = + request.unconsumedParams().stream().filter(p -> !responseParams().contains(p)).collect(Collectors.toCollection(TreeSet::new)); + + // validate the non-response params + if (unconsumedParams.isEmpty() == false) { + String message = String.format( + Locale.ROOT, + "request [%s] contains unrecognized parameter%s: ", + request.path(), + unconsumedParams.size() > 1 ? "s" : ""); + boolean first = true; + for (final String unconsumedParam : unconsumedParams) { + final LevensteinDistance ld = new LevensteinDistance(); + final List> scoredParams = new ArrayList<>(); + final Set candidateParams = new HashSet<>(); + candidateParams.addAll(request.consumedParams()); + candidateParams.addAll(responseParams()); + for (final String candidateParam : candidateParams) { + final float distance = ld.getDistance(unconsumedParam, candidateParam); + if (distance > 0.5f) { + scoredParams.add(new Tuple<>(distance, candidateParam)); + } + } + CollectionUtil.timSort(scoredParams, (a, b) -> { + // sort by distance in reverse order, then parameter name for equal distances + int compare = a.v1().compareTo(b.v1()); + if (compare != 0) return -compare; + else return a.v2().compareTo(b.v2()); + }); + if (first == false) { + message += ", "; + } + message += "[" + unconsumedParam + "]"; + final List keys = scoredParams.stream().map(Tuple::v2).collect(Collectors.toList()); + if (keys.isEmpty() == false) { + message += " -> did you mean " + (keys.size() == 1 ? "[" + keys.get(0) + "]": "any of " + keys.toString()) + "?"; + } + first = false; + } + + throw new IllegalArgumentException(message); + } + + // execute the action + action.accept(channel); + } + + /** + * REST requests are handled by preparing a channel consumer that represents the execution of + * the request against a channel. + */ + @FunctionalInterface + protected interface RestChannelConsumer { + /** + * Executes a request against the given channel. + * + * @param channel the channel for sending the response + * @throws Exception if an exception occurred executing the request + */ + void accept(RestChannel channel) throws Exception; + } + + /** + * Prepare the request for execution. Implementations should consume all request params before + * returning the runnable for actual execution. Unconsumed params will immediately terminate + * execution of the request. However, some params are only used in processing the response; + * implementations can override {@link BaseRestHandler#responseParams()} to indicate such + * params. + * + * @param request the request to execute + * @param client client for executing actions on the local node + * @return the action to execute + * @throws IOException if an I/O exception occurred parsing the request and preparing for + * execution + */ + protected abstract RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException; + + /** + * Parameters used for controlling the response and thus might not be consumed during + * preparation of the request execution in + * {@link BaseRestHandler#prepareRequest(RestRequest, NodeClient)}. + * + * @return a set of parameters used to control the response and thus should not trip strict + * URL parameter checks. + */ + protected Set responseParams() { + return Collections.emptySet(); + } + } diff --git a/core/src/main/java/org/elasticsearch/rest/RestController.java b/core/src/main/java/org/elasticsearch/rest/RestController.java index e63f35884e8..483d8986ab1 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestController.java +++ b/core/src/main/java/org/elasticsearch/rest/RestController.java @@ -39,9 +39,6 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; import static org.elasticsearch.rest.RestStatus.OK; -/** - * - */ public class RestController extends AbstractLifecycleComponent { private final PathTrie getHandlers = new PathTrie<>(RestUtils.REST_DECODER); private final PathTrie postHandlers = new PathTrie<>(RestUtils.REST_DECODER); @@ -220,10 +217,11 @@ public class RestController extends AbstractLifecycleComponent { */ boolean checkRequestParameters(final RestRequest request, final RestChannel channel) { // error_trace cannot be used when we disable detailed errors - if (channel.detailedErrorsEnabled() == false && request.paramAsBoolean("error_trace", false)) { + // we consume the error_trace parameter first to ensure that it is always consumed + if (request.paramAsBoolean("error_trace", false) && channel.detailedErrorsEnabled() == false) { try { XContentBuilder builder = channel.newErrorBuilder(); - builder.startObject().field("error","error traces in responses are disabled.").endObject().string(); + builder.startObject().field("error", "error traces in responses are disabled.").endObject().string(); RestResponse response = new BytesRestResponse(BAD_REQUEST, builder); response.addHeader("Content-Type", "application/json"); channel.sendResponse(response); diff --git a/core/src/main/java/org/elasticsearch/rest/RestRequest.java b/core/src/main/java/org/elasticsearch/rest/RestRequest.java index 2db917dacf0..3f0f32fff37 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/core/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -28,9 +28,12 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import java.net.SocketAddress; -import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue; import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; @@ -39,6 +42,7 @@ public abstract class RestRequest implements ToXContent.Params { private final Map params; private final String rawPath; + private final Set consumedParams = new HashSet<>(); public RestRequest(String uri) { final Map params = new HashMap<>(); @@ -106,11 +110,13 @@ public abstract class RestRequest implements ToXContent.Params { @Override public final String param(String key) { + consumedParams.add(key); return params.get(key); } @Override public final String param(String key, String defaultValue) { + consumedParams.add(key); String value = params.get(key); if (value == null) { return defaultValue; @@ -122,6 +128,30 @@ public abstract class RestRequest implements ToXContent.Params { return params; } + /** + * Returns a list of parameters that have been consumed. This method returns a copy, callers + * are free to modify the returned list. + * + * @return the list of currently consumed parameters. + */ + List consumedParams() { + return consumedParams.stream().collect(Collectors.toList()); + } + + /** + * Returns a list of parameters that have not yet been consumed. This method returns a copy, + * callers are free to modify the returned list. + * + * @return the list of currently unconsumed parameters. + */ + List unconsumedParams() { + return params + .keySet() + .stream() + .filter(p -> !consumedParams.contains(p)) + .collect(Collectors.toList()); + } + public float paramAsFloat(String key, float defaultValue) { String sValue = param(key); if (sValue == null) { diff --git a/core/src/main/java/org/elasticsearch/rest/RestResponse.java b/core/src/main/java/org/elasticsearch/rest/RestResponse.java index 7946785bc97..7e031f8d004 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestResponse.java +++ b/core/src/main/java/org/elasticsearch/rest/RestResponse.java @@ -29,9 +29,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -/** - * - */ public abstract class RestResponse { protected Map> customHeaders; diff --git a/core/src/main/java/org/elasticsearch/rest/action/AcknowledgedRestListener.java b/core/src/main/java/org/elasticsearch/rest/action/AcknowledgedRestListener.java index 203c2eafc77..e12329f93a3 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/AcknowledgedRestListener.java +++ b/core/src/main/java/org/elasticsearch/rest/action/AcknowledgedRestListener.java @@ -28,8 +28,6 @@ import java.io.IOException; import static org.elasticsearch.rest.RestStatus.OK; -/** - */ public class AcknowledgedRestListener extends RestBuilderListener { public AcknowledgedRestListener(RestChannel channel) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestActions.java b/core/src/main/java/org/elasticsearch/rest/action/RestActions.java index bb72e3e2249..d017dfaf874 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/RestActions.java +++ b/core/src/main/java/org/elasticsearch/rest/action/RestActions.java @@ -51,9 +51,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.util.List; -/** - * - */ public class RestActions { public static long parseVersion(RestRequest request) { @@ -195,7 +192,6 @@ public class RestActions { queryBuilder.defaultField(request.param("df")); queryBuilder.analyzer(request.param("analyzer")); queryBuilder.analyzeWildcard(request.paramAsBoolean("analyze_wildcard", false)); - queryBuilder.lowercaseExpandedTerms(request.paramAsBoolean("lowercase_expanded_terms", true)); queryBuilder.lenient(request.paramAsBoolean("lenient", null)); String defaultOperator = request.param("default_operator"); if (defaultOperator != null) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java b/core/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java index cc93e72d80d..c460331afaa 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java +++ b/core/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java @@ -34,11 +34,22 @@ public abstract class RestBuilderListener extends RestResponseListener @Override public final RestResponse buildResponse(Response response) throws Exception { - return buildResponse(response, channel.newBuilder()); + try (XContentBuilder builder = channel.newBuilder()) { + final RestResponse restResponse = buildResponse(response, builder); + assert assertBuilderClosed(builder); + return restResponse; + } } /** - * Builds a response to send back over the channel. + * Builds a response to send back over the channel. Implementors should ensure that they close the provided {@link XContentBuilder} + * using the {@link XContentBuilder#close()} method. */ public abstract RestResponse buildResponse(Response response, XContentBuilder builder) throws Exception; + + // pkg private method that we can override for testing + boolean assertBuilderClosed(XContentBuilder xContentBuilder) { + assert xContentBuilder.generator().isClosed() : "callers should ensure the XContentBuilder is closed themselves"; + return true; + } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestFieldStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/RestFieldStatsAction.java index e6ef620db10..080fbbfb7aa 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/RestFieldStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/RestFieldStatsAction.java @@ -30,20 +30,18 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; -/** - */ public class RestFieldStatsAction extends BaseRestHandler { @Inject @@ -56,8 +54,8 @@ public class RestFieldStatsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, - final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, + final NodeClient client) throws IOException { if (RestActions.hasBodyContent(request) && request.hasParam("fields")) { throw new IllegalArgumentException("can't specify a request body and [fields] request parameter, " + "either specify a request body or the [fields] request parameter"); @@ -73,7 +71,7 @@ public class RestFieldStatsAction extends BaseRestHandler { fieldStatsRequest.setFields(Strings.splitStringByCommaToArray(request.param("fields"))); } - client.fieldStats(fieldStatsRequest, new RestBuilderListener(channel) { + return channel -> client.fieldStats(fieldStatsRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(FieldStatsResponse response, XContentBuilder builder) throws Exception { builder.startObject(); @@ -81,7 +79,7 @@ public class RestFieldStatsAction extends BaseRestHandler { builder.startObject("indices"); for (Map.Entry> entry1 : - response.getIndicesMergedFieldStats().entrySet()) { + response.getIndicesMergedFieldStats().entrySet()) { builder.startObject(entry1.getKey()); builder.startObject("fields"); for (Map.Entry entry2 : entry1.getValue().entrySet()) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestMainAction.java b/core/src/main/java/org/elasticsearch/rest/action/RestMainAction.java index 56053f414b0..210ccb2e227 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/RestMainAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/RestMainAction.java @@ -23,12 +23,12 @@ import org.elasticsearch.action.main.MainAction; import org.elasticsearch.action.main.MainRequest; import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -39,9 +39,6 @@ import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; -/** - * - */ public class RestMainAction extends BaseRestHandler { @Inject @@ -52,8 +49,8 @@ public class RestMainAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { - client.execute(MainAction.INSTANCE, new MainRequest(), new RestBuilderListener(channel) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + return channel -> client.execute(MainAction.INSTANCE, new MainRequest(), new RestBuilderListener(channel) { @Override public RestResponse buildResponse(MainResponse mainResponse, XContentBuilder builder) throws Exception { return convertMainResponse(mainResponse, request, builder); @@ -63,9 +60,6 @@ public class RestMainAction extends BaseRestHandler { static BytesRestResponse convertMainResponse(MainResponse response, RestRequest request, XContentBuilder builder) throws IOException { RestStatus status = response.isAvailable() ? RestStatus.OK : RestStatus.SERVICE_UNAVAILABLE; - if (request.method() == RestRequest.Method.HEAD) { - return new BytesRestResponse(status, builder); - } // Default to pretty printing, but allow ?pretty=false to disable if (request.hasParam("pretty") == false) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java index 3c558fba937..b6a4e743d13 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java @@ -26,11 +26,12 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.tasks.TaskId; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.action.admin.cluster.RestListTasksAction.listTasksResponseListener; @@ -43,26 +44,29 @@ public class RestCancelTasksAction extends BaseRestHandler { super(settings); this.clusterService = clusterService; controller.registerHandler(POST, "/_tasks/_cancel", this); - controller.registerHandler(POST, "/_tasks/{taskId}/_cancel", this); + controller.registerHandler(POST, "/_tasks/{task_id}/_cancel", this); } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { - String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); - TaskId taskId = new TaskId(request.param("taskId")); - String[] actions = Strings.splitStringByCommaToArray(request.param("actions")); - TaskId parentTaskId = new TaskId(request.param("parent_task_id")); + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodes")); + final TaskId taskId = new TaskId(request.param("task_id")); + final String[] actions = Strings.splitStringByCommaToArray(request.param("actions")); + final TaskId parentTaskId = new TaskId(request.param("parent_task_id")); + final String groupBy = request.param("group_by", "nodes"); CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); cancelTasksRequest.setTaskId(taskId); - cancelTasksRequest.setNodesIds(nodesIds); + cancelTasksRequest.setNodes(nodesIds); cancelTasksRequest.setActions(actions); cancelTasksRequest.setParentTaskId(parentTaskId); - client.admin().cluster().cancelTasks(cancelTasksRequest, listTasksResponseListener(clusterService, channel)); + return channel -> + client.admin().cluster().cancelTasks(cancelTasksRequest, listTasksResponseListener(clusterService, groupBy, channel)); } @Override public boolean canTripCircuitBreaker() { return false; } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java index bae1d1b6714..3add5cb0390 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java @@ -19,8 +19,6 @@ package org.elasticsearch.rest.action.admin.cluster; -import java.io.IOException; - import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; @@ -35,7 +33,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -43,6 +40,8 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; + /** * Class handling cluster allocation explanation at the REST level */ @@ -56,7 +55,7 @@ public class RestClusterAllocationExplainAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { ClusterAllocationExplainRequest req; if (RestActions.hasBodyContent(request) == false) { // Empty request signals "explain the first unassigned shard you find" @@ -67,16 +66,16 @@ public class RestClusterAllocationExplainAction extends BaseRestHandler { req = ClusterAllocationExplainRequest.parse(parser); } catch (IOException e) { logger.debug("failed to parse allocation explain request", e); - channel.sendResponse( - new BytesRestResponse(ExceptionsHelper.status(e), BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)); - return; + return channel -> channel.sendResponse( + new BytesRestResponse(ExceptionsHelper.status(e), BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)); } } try { req.includeYesDecisions(request.paramAsBoolean("include_yes_decisions", false)); req.includeDiskInfo(request.paramAsBoolean("include_disk_info", false)); - client.admin().cluster().allocationExplain(req, new RestBuilderListener(channel) { + return channel -> + client.admin().cluster().allocationExplain(req, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ClusterAllocationExplainResponse response, XContentBuilder builder) throws Exception { response.getExplanation().toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -85,7 +84,9 @@ public class RestClusterAllocationExplainAction extends BaseRestHandler { }); } catch (Exception e) { logger.error("failed to explain allocation", e); - channel.sendResponse(new BytesRestResponse(ExceptionsHelper.status(e), BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)); + return channel -> + channel.sendResponse( + new BytesRestResponse(ExceptionsHelper.status(e), BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)); } } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index ca2cbaf79fa..e55603a19bd 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -40,6 +39,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; import java.io.IOException; +import java.util.Set; public class RestClusterGetSettingsAction extends BaseRestHandler { @@ -56,13 +56,13 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest() .routingTable(false) .nodes(false); final boolean renderDefaults = request.paramAsBoolean("include_defaults", false); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - client.admin().cluster().state(clusterStateRequest, new RestBuilderListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder builder) throws Exception { return new BytesRestResponse(RestStatus.OK, renderResponse(response.getState(), renderDefaults, builder, request)); @@ -70,6 +70,11 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { }); } + @Override + protected Set responseParams() { + return Settings.FORMAT_PARAMS; + } + @Override public boolean canTripCircuitBreaker() { return false; diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java index 5f64bcf8aa3..ad51c9f1e64 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -29,12 +28,14 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestStatusToXContentListener; +import java.io.IOException; +import java.util.Collections; import java.util.Locale; +import java.util.Set; import static org.elasticsearch.client.Requests.clusterHealthRequest; @@ -49,7 +50,7 @@ public class RestClusterHealthAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { ClusterHealthRequest clusterHealthRequest = clusterHealthRequest(Strings.splitStringByCommaToArray(request.param("index"))); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); clusterHealthRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterHealthRequest.masterNodeTimeout())); @@ -73,11 +74,19 @@ public class RestClusterHealthAction extends BaseRestHandler { if (request.param("wait_for_events") != null) { clusterHealthRequest.waitForEvents(Priority.valueOf(request.param("wait_for_events").toUpperCase(Locale.ROOT))); } - client.admin().cluster().health(clusterHealthRequest, new RestStatusToXContentListener(channel)); + return channel -> client.admin().cluster().health(clusterHealthRequest, new RestStatusToXContentListener<>(channel)); + } + + private static final Set RESPONSE_PARAMS = Collections.singleton("level"); + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; } @Override public boolean canTripCircuitBreaker() { return false; } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java index 1a1e78b1720..d5815b3b31c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java @@ -21,8 +21,8 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; -import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.Requests; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommandRegistry; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; @@ -40,16 +40,16 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; import java.io.IOException; +import java.util.Collections; import java.util.EnumSet; +import java.util.HashSet; +import java.util.Set; -/** - */ public class RestClusterRerouteAction extends BaseRestHandler { private static final ObjectParser PARSER = new ObjectParser<>("cluster_reroute"); static { @@ -74,29 +74,47 @@ public class RestClusterRerouteAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { ClusterRerouteRequest clusterRerouteRequest = createRequest(request, registry, parseFieldMatcher); - client.admin().cluster().reroute(clusterRerouteRequest, new AcknowledgedRestListener(channel) { - @Override - protected void addCustomFields(XContentBuilder builder, ClusterRerouteResponse response) throws IOException { - builder.startObject("state"); - // by default, return everything but metadata - if (request.param("metric") == null) { - request.params().put("metric", DEFAULT_METRICS); + + // by default, return everything but metadata + final String metric = request.param("metric"); + if (metric == null) { + request.params().put("metric", DEFAULT_METRICS); + } + + return channel -> + client.admin().cluster().reroute(clusterRerouteRequest, new AcknowledgedRestListener(channel) { + @Override + protected void addCustomFields(XContentBuilder builder, ClusterRerouteResponse response) throws IOException { + builder.startObject("state"); + settingsFilter.addFilterSettingParams(request); + response.getState().toXContent(builder, request); + builder.endObject(); + if (clusterRerouteRequest.explain()) { + assert response.getExplanations() != null; + response.getExplanations().toXContent(builder, ToXContent.EMPTY_PARAMS); + } } - settingsFilter.addFilterSettingParams(request); - response.getState().toXContent(builder, request); - builder.endObject(); - if (clusterRerouteRequest.explain()) { - assert response.getExplanations() != null; - response.getExplanations().toXContent(builder, ToXContent.EMPTY_PARAMS); - } - } }); } + private static final Set RESPONSE_PARAMS; + + static { + final Set responseParams = new HashSet<>(); + responseParams.add("metric"); + responseParams.addAll(Settings.FORMAT_PARAMS); + RESPONSE_PARAMS = Collections.unmodifiableSet(responseParams); + } + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; + } + public static ClusterRerouteRequest createRequest(RestRequest request, AllocationCommandRegistry registry, - ParseFieldMatcher parseFieldMatcher) throws IOException { + ParseFieldMatcher parseFieldMatcher) throws IOException { ClusterRerouteRequest clusterRerouteRequest = Requests.clusterRerouteRequest(); clusterRerouteRequest.dryRun(request.paramAsBoolean("dry_run", clusterRerouteRequest.dryRun())); clusterRerouteRequest.explain(request.paramAsBoolean("explain", clusterRerouteRequest.explain())); @@ -125,4 +143,5 @@ public class RestClusterRerouteAction extends BaseRestHandler { return parseFieldMatcher; } } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java index 754b9b0d633..459ccf5fa4d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java @@ -20,24 +20,22 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.Requests; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -/** - */ public class RestClusterSearchShardsAction extends BaseRestHandler { @Inject @@ -52,7 +50,7 @@ public class RestClusterSearchShardsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterSearchShardsRequest clusterSearchShardsRequest = Requests.clusterSearchShardsRequest(indices); clusterSearchShardsRequest.local(request.paramAsBoolean("local", clusterSearchShardsRequest.local())); @@ -62,6 +60,6 @@ public class RestClusterSearchShardsAction extends BaseRestHandler { clusterSearchShardsRequest.preference(request.param("preference")); clusterSearchShardsRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterSearchShardsRequest.indicesOptions())); - client.admin().cluster().searchShards(clusterSearchShardsRequest, new RestToXContentListener(channel)); + return channel -> client.admin().cluster().searchShards(clusterSearchShardsRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java index fab2ee0062f..54a55a0ba06 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java @@ -22,8 +22,8 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.Requests; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -32,14 +32,17 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; +import java.util.Collections; import java.util.EnumSet; +import java.util.HashSet; +import java.util.Set; public class RestClusterStateAction extends BaseRestHandler { @@ -56,7 +59,7 @@ public class RestClusterStateAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest(); clusterStateRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterStateRequest.indicesOptions())); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); @@ -84,7 +87,7 @@ public class RestClusterStateAction extends BaseRestHandler { } settingsFilter.addFilterSettingParams(request); - client.admin().cluster().state(clusterStateRequest, new RestBuilderListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder builder) throws Exception { builder.startObject(); @@ -96,6 +99,20 @@ public class RestClusterStateAction extends BaseRestHandler { }); } + private static final Set RESPONSE_PARAMS; + + static { + final Set responseParams = new HashSet<>(); + responseParams.add("metric"); + responseParams.addAll(Settings.FORMAT_PARAMS); + RESPONSE_PARAMS = Collections.unmodifiableSet(responseParams); + } + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; + } + @Override public boolean canTripCircuitBreaker() { return false; diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java index 7ef05d04553..d1c7346eb0e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java @@ -24,14 +24,12 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions.NodesResponseRestListener; -/** - * - */ +import java.io.IOException; + public class RestClusterStatsAction extends BaseRestHandler { @Inject @@ -42,10 +40,10 @@ public class RestClusterStatsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { ClusterStatsRequest clusterStatsRequest = new ClusterStatsRequest().nodesIds(request.paramAsStringArray("nodeId", null)); clusterStatsRequest.timeout(request.param("timeout")); - client.admin().cluster().clusterStats(clusterStatsRequest, new NodesResponseRestListener<>(channel)); + return channel -> client.admin().cluster().clusterStats(clusterStatsRequest, new NodesResponseRestListener<>(channel)); } @Override diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java index 8de725dbe79..b041ae4c3ab 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java @@ -21,21 +21,21 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.Requests; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; import java.io.IOException; import java.util.Map; +import java.util.Set; public class RestClusterUpdateSettingsAction extends BaseRestHandler { @@ -46,7 +46,7 @@ public class RestClusterUpdateSettingsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = Requests.clusterUpdateSettingsRequest(); clusterUpdateSettingsRequest.timeout(request.paramAsTime("timeout", clusterUpdateSettingsRequest.timeout())); clusterUpdateSettingsRequest.masterNodeTimeout( @@ -62,7 +62,7 @@ public class RestClusterUpdateSettingsAction extends BaseRestHandler { clusterUpdateSettingsRequest.persistentSettings((Map) source.get("persistent")); } - client.admin().cluster().updateSettings(clusterUpdateSettingsRequest, + return channel -> client.admin().cluster().updateSettings(clusterUpdateSettingsRequest, new AcknowledgedRestListener(channel) { @Override protected void addCustomFields(XContentBuilder builder, ClusterUpdateSettingsResponse response) throws IOException { @@ -77,6 +77,11 @@ public class RestClusterUpdateSettingsAction extends BaseRestHandler { }); } + @Override + protected Set responseParams() { + return Settings.FORMAT_PARAMS; + } + @Override public boolean canTripCircuitBreaker() { return false; diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java index 96449131a61..84ca88d292b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java @@ -20,16 +20,16 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; + import static org.elasticsearch.client.Requests.createSnapshotRequest; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -47,11 +47,11 @@ public class RestCreateSnapshotAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { CreateSnapshotRequest createSnapshotRequest = createSnapshotRequest(request.param("repository"), request.param("snapshot")); createSnapshotRequest.source(request.content().utf8ToString()); createSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createSnapshotRequest.masterNodeTimeout())); createSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); - client.admin().cluster().createSnapshot(createSnapshotRequest, new RestToXContentListener(channel)); + return channel -> client.admin().cluster().createSnapshot(createSnapshotRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java index 78d063bae00..780368dfd34 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java @@ -20,16 +20,16 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; -import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; + import static org.elasticsearch.client.Requests.deleteRepositoryRequest; import static org.elasticsearch.rest.RestRequest.Method.DELETE; @@ -45,11 +45,11 @@ public class RestDeleteRepositoryAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteRepositoryRequest deleteRepositoryRequest = deleteRepositoryRequest(request.param("repository")); deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); deleteRepositoryRequest.timeout(request.paramAsTime("timeout", deleteRepositoryRequest.timeout())); deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); - client.admin().cluster().deleteRepository(deleteRepositoryRequest, new AcknowledgedRestListener(channel)); + return channel -> client.admin().cluster().deleteRepository(deleteRepositoryRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java index d001a1e90e5..29ad89acda6 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java @@ -20,16 +20,16 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; + import static org.elasticsearch.client.Requests.deleteSnapshotRequest; import static org.elasticsearch.rest.RestRequest.Method.DELETE; @@ -45,9 +45,9 @@ public class RestDeleteSnapshotAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteSnapshotRequest deleteSnapshotRequest = deleteSnapshotRequest(request.param("repository"), request.param("snapshot")); deleteSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteSnapshotRequest.masterNodeTimeout())); - client.admin().cluster().deleteSnapshot(deleteSnapshotRequest, new AcknowledgedRestListener(channel)); + return channel -> client.admin().cluster().deleteSnapshot(deleteSnapshotRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java index 212b42135e9..31a0c70b67b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java @@ -23,11 +23,12 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.DELETE; public class RestDeleteStoredScriptAction extends BaseRestHandler { @@ -49,9 +50,9 @@ public class RestDeleteStoredScriptAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) throws IOException { DeleteStoredScriptRequest deleteStoredScriptRequest = new DeleteStoredScriptRequest(getScriptLang(request), request.param("id")); - client.admin().cluster().deleteStoredScript(deleteStoredScriptRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().cluster().deleteStoredScript(deleteStoredScriptRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java index 802af3cb5b8..6e5d06f3740 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java @@ -31,12 +31,14 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; +import java.util.Set; + import static org.elasticsearch.client.Requests.getRepositoryRequest; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; @@ -57,23 +59,30 @@ public class RestGetRepositoriesAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] repositories = request.paramAsStringArray("repository", Strings.EMPTY_ARRAY); GetRepositoriesRequest getRepositoriesRequest = getRepositoryRequest(repositories); getRepositoriesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRepositoriesRequest.masterNodeTimeout())); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); settingsFilter.addFilterSettingParams(request); - client.admin().cluster().getRepositories(getRepositoriesRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(GetRepositoriesResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - for (RepositoryMetaData repositoryMetaData : response.repositories()) { - RepositoriesMetaData.toXContent(repositoryMetaData, builder, request); - } - builder.endObject(); + return channel -> + client.admin().cluster().getRepositories(getRepositoriesRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(GetRepositoriesResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + for (RepositoryMetaData repositoryMetaData : response.repositories()) { + RepositoriesMetaData.toXContent(repositoryMetaData, builder, request); + } + builder.endObject(); - return new BytesRestResponse(OK, builder); - } + return new BytesRestResponse(OK, builder); + } }); } + + @Override + protected Set responseParams() { + return Settings.FORMAT_PARAMS; + } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index 9e10a87bc03..515ce2d68d0 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -20,17 +20,17 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; + import static org.elasticsearch.client.Requests.getSnapshotsRequest; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -47,7 +47,7 @@ public class RestGetSnapshotsAction extends BaseRestHandler { @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String repository = request.param("repository"); String[] snapshots = request.paramAsStringArray("snapshot", Strings.EMPTY_ARRAY); @@ -55,6 +55,6 @@ public class RestGetSnapshotsAction extends BaseRestHandler { getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable())); getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout())); - client.admin().cluster().getSnapshots(getSnapshotsRequest, new RestToXContentListener(channel)); + return channel -> client.admin().cluster().getSnapshots(getSnapshotsRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java index 1185685c49a..a2d11e75a67 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java @@ -26,13 +26,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestGetStoredScriptAction extends BaseRestHandler { @@ -58,9 +59,9 @@ public class RestGetStoredScriptAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) throws IOException { final GetStoredScriptRequest getRequest = new GetStoredScriptRequest(getScriptLang(request), request.param("id")); - client.admin().cluster().getStoredScript(getRequest, new RestBuilderListener(channel) { + return channel -> client.admin().cluster().getStoredScript(getRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(GetStoredScriptResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java index f1edf672010..df0945d99b8 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java @@ -25,12 +25,13 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.tasks.TaskId; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestGetTaskAction extends BaseRestHandler { @@ -41,7 +42,7 @@ public class RestGetTaskAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { TaskId taskId = new TaskId(request.param("taskId")); boolean waitForCompletion = request.paramAsBoolean("wait_for_completion", false); TimeValue timeout = request.paramAsTime("timeout", null); @@ -50,6 +51,6 @@ public class RestGetTaskAction extends BaseRestHandler { getTaskRequest.setTaskId(taskId); getTaskRequest.setWaitForCompletion(waitForCompletion); getTaskRequest.setTimeout(timeout); - client.admin().cluster().getTask(getTaskRequest, new RestToXContentListener<>(channel)); + return channel -> client.admin().cluster().getTask(getTaskRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java index d5ff427e3d0..66a3c9cf6d6 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java @@ -40,10 +40,13 @@ import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.tasks.TaskId; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestListTasksAction extends BaseRestHandler { + private final ClusterService clusterService; @Inject @@ -53,16 +56,23 @@ public class RestListTasksAction extends BaseRestHandler { controller.registerHandler(GET, "/_tasks", this); } + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final ListTasksRequest listTasksRequest = generateListTasksRequest(request); + final String groupBy = request.param("group_by", "nodes"); + return channel -> client.admin().cluster().listTasks(listTasksRequest, listTasksResponseListener(clusterService, groupBy, channel)); + } + public static ListTasksRequest generateListTasksRequest(RestRequest request) { boolean detailed = request.paramAsBoolean("detailed", false); - String[] nodesIds = Strings.splitStringByCommaToArray(request.param("node_id")); + String[] nodes = Strings.splitStringByCommaToArray(request.param("nodes")); String[] actions = Strings.splitStringByCommaToArray(request.param("actions")); TaskId parentTaskId = new TaskId(request.param("parent_task_id")); boolean waitForCompletion = request.paramAsBoolean("wait_for_completion", false); TimeValue timeout = request.paramAsTime("timeout", null); ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.setNodesIds(nodesIds); + listTasksRequest.setNodes(nodes); listTasksRequest.setDetailed(detailed); listTasksRequest.setActions(actions); listTasksRequest.setParentTaskId(parentTaskId); @@ -71,17 +81,13 @@ public class RestListTasksAction extends BaseRestHandler { return listTasksRequest; } - @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { - client.admin().cluster().listTasks(generateListTasksRequest(request), listTasksResponseListener(clusterService, channel)); - } - /** * Standard listener for extensions of {@link ListTasksResponse} that supports {@code group_by=nodes}. */ - public static ActionListener listTasksResponseListener(ClusterService clusterService, - RestChannel channel) { - String groupBy = channel.request().param("group_by", "nodes"); + public static ActionListener listTasksResponseListener( + ClusterService clusterService, + String groupBy, + final RestChannel channel) { if ("nodes".equals(groupBy)) { return new RestBuilderListener(channel) { @Override diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java index 87af57276c2..3cfb80a46f0 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java @@ -29,16 +29,15 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestResponseListener; +import java.io.IOException; + -/** - */ public class RestNodesHotThreadsAction extends BaseRestHandler { @Inject @@ -56,7 +55,7 @@ public class RestNodesHotThreadsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); NodesHotThreadsRequest nodesHotThreadsRequest = new NodesHotThreadsRequest(nodesIds); nodesHotThreadsRequest.threads(request.paramAsInt("threads", nodesHotThreadsRequest.threads())); @@ -65,18 +64,20 @@ public class RestNodesHotThreadsAction extends BaseRestHandler { nodesHotThreadsRequest.interval(TimeValue.parseTimeValue(request.param("interval"), nodesHotThreadsRequest.interval(), "interval")); nodesHotThreadsRequest.snapshots(request.paramAsInt("snapshots", nodesHotThreadsRequest.snapshots())); nodesHotThreadsRequest.timeout(request.param("timeout")); - client.admin().cluster().nodesHotThreads(nodesHotThreadsRequest, new RestResponseListener(channel) { - @Override - public RestResponse buildResponse(NodesHotThreadsResponse response) throws Exception { - StringBuilder sb = new StringBuilder(); - for (NodeHotThreads node : response.getNodes()) { - sb.append("::: ").append(node.getNode().toString()).append("\n"); - Strings.spaceify(3, node.getHotThreads(), sb); - sb.append('\n'); - } - return new BytesRestResponse(RestStatus.OK, sb.toString()); - } - }); + return channel -> client.admin().cluster().nodesHotThreads( + nodesHotThreadsRequest, + new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(NodesHotThreadsResponse response) throws Exception { + StringBuilder sb = new StringBuilder(); + for (NodeHotThreads node : response.getNodes()) { + sb.append("::: ").append(node.getNode().toString()).append("\n"); + Strings.spaceify(3, node.getHotThreads(), sb); + sb.append('\n'); + } + return new BytesRestResponse(RestStatus.OK, sb.toString()); + } + }); } @Override diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java index c45709d07d5..40cfc6372a3 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java @@ -27,11 +27,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions.NodesResponseRestListener; +import java.io.IOException; import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -65,7 +65,7 @@ public class RestNodesInfoAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] nodeIds; Set metrics; @@ -108,7 +108,12 @@ public class RestNodesInfoAction extends BaseRestHandler { settingsFilter.addFilterSettingParams(request); - client.admin().cluster().nodesInfo(nodesInfoRequest, new NodesResponseRestListener<>(channel)); + return channel -> client.admin().cluster().nodesInfo(nodesInfoRequest, new NodesResponseRestListener<>(channel)); + } + + @Override + protected Set responseParams() { + return Settings.FORMAT_PARAMS; } @Override diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java index be6847f1b52..917f5b2c5b1 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -27,11 +27,12 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions.NodesResponseRestListener; +import java.io.IOException; +import java.util.Collections; import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -53,7 +54,7 @@ public class RestNodesStatsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); Set metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all")); @@ -111,11 +112,19 @@ public class RestNodesStatsAction extends BaseRestHandler { nodesStatsRequest.indices().includeSegmentFileSizes(true); } - client.admin().cluster().nodesStats(nodesStatsRequest, new NodesResponseRestListener<>(channel)); + return channel -> client.admin().cluster().nodesStats(nodesStatsRequest, new NodesResponseRestListener<>(channel)); + } + + private final Set RESPONSE_PARAMS = Collections.singleton("level"); + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; } @Override public boolean canTripCircuitBreaker() { return false; } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java index d1cb65092ce..e3441d00d06 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java @@ -24,11 +24,12 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; + public class RestPendingClusterTasksAction extends BaseRestHandler { @Inject @@ -38,10 +39,10 @@ public class RestPendingClusterTasksAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); - client.admin().cluster().pendingClusterTasks(pendingClusterTasksRequest, new RestToXContentListener<>(channel)); + return channel -> client.admin().cluster().pendingClusterTasks(pendingClusterTasksRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java index 002e1bfdc95..c142230925d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java @@ -20,16 +20,16 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; + import static org.elasticsearch.client.Requests.putRepositoryRequest; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -48,12 +48,12 @@ public class RestPutRepositoryAction extends BaseRestHandler { @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PutRepositoryRequest putRepositoryRequest = putRepositoryRequest(request.param("repository")); putRepositoryRequest.source(request.content().utf8ToString()); putRepositoryRequest.verify(request.paramAsBoolean("verify", true)); putRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRepositoryRequest.masterNodeTimeout())); putRepositoryRequest.timeout(request.paramAsTime("timeout", putRepositoryRequest.timeout())); - client.admin().cluster().putRepository(putRepositoryRequest, new AcknowledgedRestListener(channel)); + return channel -> client.admin().cluster().putRepository(putRepositoryRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java index c5156c4cd09..aec998e6f9a 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java @@ -23,11 +23,12 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -51,9 +52,9 @@ public class RestPutStoredScriptAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) throws IOException { PutStoredScriptRequest putRequest = new PutStoredScriptRequest(getScriptLang(request), request.param("id")); putRequest.script(request.content()); - client.admin().cluster().putStoredScript(putRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().cluster().putStoredScript(putRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java index 100866e02db..f94990e1ae7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java @@ -20,16 +20,16 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; + import static org.elasticsearch.client.Requests.restoreSnapshotRequest; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -45,11 +45,11 @@ public class RestRestoreSnapshotAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { RestoreSnapshotRequest restoreSnapshotRequest = restoreSnapshotRequest(request.param("repository"), request.param("snapshot")); restoreSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", restoreSnapshotRequest.masterNodeTimeout())); restoreSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); restoreSnapshotRequest.source(request.content().utf8ToString()); - client.admin().cluster().restoreSnapshot(restoreSnapshotRequest, new RestToXContentListener(channel)); + return channel -> client.admin().cluster().restoreSnapshot(restoreSnapshotRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java index 4333dfc0271..b517e32a3da 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java @@ -25,11 +25,12 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; + import static org.elasticsearch.client.Requests.snapshotsStatusRequest; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -47,7 +48,7 @@ public class RestSnapshotsStatusAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String repository = request.param("repository", "_all"); String[] snapshots = request.paramAsStringArray("snapshot", Strings.EMPTY_ARRAY); if (snapshots.length == 1 && "_all".equalsIgnoreCase(snapshots[0])) { @@ -57,6 +58,6 @@ public class RestSnapshotsStatusAction extends BaseRestHandler { snapshotsStatusRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", snapshotsStatusRequest.ignoreUnavailable())); snapshotsStatusRequest.masterNodeTimeout(request.paramAsTime("master_timeout", snapshotsStatusRequest.masterNodeTimeout())); - client.admin().cluster().snapshotsStatus(snapshotsStatusRequest, new RestToXContentListener<>(channel)); + return channel -> client.admin().cluster().snapshotsStatus(snapshotsStatusRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java index 85aac840777..84427524466 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java @@ -24,11 +24,12 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; + import static org.elasticsearch.client.Requests.verifyRepositoryRequest; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -41,10 +42,10 @@ public class RestVerifyRepositoryAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { VerifyRepositoryRequest verifyRepositoryRequest = verifyRepositoryRequest(request.param("repository")); verifyRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", verifyRepositoryRequest.masterNodeTimeout())); verifyRepositoryRequest.timeout(request.paramAsTime("timeout", verifyRepositoryRequest.timeout())); - client.admin().cluster().verifyRepository(verifyRepositoryRequest, new RestToXContentListener<>(channel)); + return channel -> client.admin().cluster().verifyRepository(verifyRepositoryRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/AliasesNotFoundException.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/AliasesNotFoundException.java index 0b60a5c249c..bb55a0e1a07 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/AliasesNotFoundException.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/AliasesNotFoundException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; import java.util.Arrays; -/** - * - */ public class AliasesNotFoundException extends ResourceNotFoundException { public AliasesNotFoundException(String... names) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAliasesExistAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAliasesExistAction.java index dbb8ddde9d1..59aafde5930 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAliasesExistAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAliasesExistAction.java @@ -31,16 +31,15 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.HEAD; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; -/** - */ public class RestAliasesExistAction extends BaseRestHandler { @Inject @@ -52,7 +51,7 @@ public class RestAliasesExistAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] aliases = request.paramAsStringArray("name", Strings.EMPTY_ARRAY); final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases); @@ -60,7 +59,7 @@ public class RestAliasesExistAction extends BaseRestHandler { getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local())); - client.admin().indices().aliasesExist(getAliasesRequest, new ActionListener() { + return channel -> client.admin().indices().aliasesExist(getAliasesRequest, new ActionListener() { @Override public void onResponse(AliasesExistResponse response) { @@ -85,6 +84,7 @@ public class RestAliasesExistAction extends BaseRestHandler { logger.error("Failed to send failure response", inner); } } + }); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java index 04d0bf57612..1390e9d771d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java @@ -22,15 +22,12 @@ import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; @@ -66,43 +63,16 @@ public class RestAnalyzeAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { - - String[] texts = request.paramAsStringArrayOrEmptyIfAll("text"); + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { AnalyzeRequest analyzeRequest = new AnalyzeRequest(request.param("index")); - analyzeRequest.text(texts); - analyzeRequest.analyzer(request.param("analyzer")); - analyzeRequest.field(request.param("field")); - if (request.hasParam("tokenizer")) { - analyzeRequest.tokenizer(request.param("tokenizer")); - } - for (String filter : request.paramAsStringArray("filter", Strings.EMPTY_ARRAY)) { - analyzeRequest.addTokenFilter(filter); - } - for (String charFilter : request.paramAsStringArray("char_filter", Strings.EMPTY_ARRAY)) { - analyzeRequest.addTokenFilter(charFilter); - } - analyzeRequest.explain(request.paramAsBoolean("explain", false)); - analyzeRequest.attributes(request.paramAsStringArray("attributes", analyzeRequest.attributes())); - if (RestActions.hasBodyContent(request)) { - XContentType type = RestActions.guessBodyContentType(request); - if (type == null) { - if (texts == null || texts.length == 0) { - texts = new String[]{ RestActions.getRestContent(request).utf8ToString() }; - analyzeRequest.text(texts); - } - } else { - // NOTE: if rest request with xcontent body has request parameters, the parameters does not override xcontent values - buildFromContent(RestActions.getRestContent(request), analyzeRequest, parseFieldMatcher); - } - } + buildFromContent(RestActions.getRestContent(request), analyzeRequest, parseFieldMatcher); - client.admin().indices().analyze(analyzeRequest, new RestToXContentListener<>(channel)); + return channel -> client.admin().indices().analyze(analyzeRequest, new RestToXContentListener<>(channel)); } - public static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest, ParseFieldMatcher parseFieldMatcher) { + static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest, ParseFieldMatcher parseFieldMatcher) { try (XContentParser parser = XContentHelper.createParser(content)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException("Malformed content, must start with an object"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java index 391eaa64d50..942da1f092e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java @@ -31,12 +31,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -57,12 +57,13 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest( Strings.splitStringByCommaToArray(request.param("index"))); clearIndicesCacheRequest.indicesOptions(IndicesOptions.fromRequest(request, clearIndicesCacheRequest.indicesOptions())); fromRequest(request, clearIndicesCacheRequest, parseFieldMatcher); - client.admin().indices().clearCache(clearIndicesCacheRequest, new RestBuilderListener(channel) { + return channel -> + client.admin().indices().clearCache(clearIndicesCacheRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ClearIndicesCacheResponse response, XContentBuilder builder) throws Exception { builder.startObject(); @@ -85,6 +86,9 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { if (parseFieldMatcher.match(entry.getKey(), Fields.QUERY)) { clearIndicesCacheRequest.queryCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.queryCache())); } + if (parseFieldMatcher.match(entry.getKey(), Fields.REQUEST_CACHE)) { + clearIndicesCacheRequest.requestCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.requestCache())); + } if (parseFieldMatcher.match(entry.getKey(), Fields.FIELD_DATA)) { clearIndicesCacheRequest.fieldDataCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.fieldDataCache())); } @@ -101,6 +105,7 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { public static class Fields { public static final ParseField QUERY = new ParseField("query", "filter", "filter_cache"); + public static final ParseField REQUEST_CACHE = new ParseField("request_cache"); public static final ParseField FIELD_DATA = new ParseField("field_data", "fielddata"); public static final ParseField RECYCLER = new ParseField("recycler"); public static final ParseField FIELDS = new ParseField("fields"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java index e5baa27f4ec..68929fda4e1 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java @@ -20,21 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; -import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; -/** - * - */ +import java.io.IOException; + public class RestCloseIndexAction extends BaseRestHandler { @Inject @@ -45,11 +42,12 @@ public class RestCloseIndexAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout())); closeIndexRequest.timeout(request.paramAsTime("timeout", closeIndexRequest.timeout())); closeIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, closeIndexRequest.indicesOptions())); - client.admin().indices().close(closeIndexRequest, new AcknowledgedRestListener(channel)); + return channel -> client.admin().indices().close(closeIndexRequest, new AcknowledgedRestListener<>(channel)); } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java index 2a7f2a629a7..7aabdfd5762 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java @@ -21,22 +21,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; import java.io.IOException; -/** - * - */ public class RestCreateIndexAction extends BaseRestHandler { @Inject @@ -47,7 +43,7 @@ public class RestCreateIndexAction extends BaseRestHandler { @SuppressWarnings({"unchecked"}) @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { CreateIndexRequest createIndexRequest = new CreateIndexRequest(request.param("index")); if (request.hasContent()) { createIndexRequest.source(request.content()); @@ -56,7 +52,7 @@ public class RestCreateIndexAction extends BaseRestHandler { createIndexRequest.timeout(request.paramAsTime("timeout", createIndexRequest.timeout())); createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout())); createIndexRequest.waitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - client.admin().indices().create(createIndexRequest, new AcknowledgedRestListener(channel) { + return channel -> client.admin().indices().create(createIndexRequest, new AcknowledgedRestListener(channel) { @Override public void addCustomFields(XContentBuilder builder, CreateIndexResponse response) throws IOException { response.addCustomFields(builder); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java index d3e07effc14..94ffc7e989b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java @@ -20,21 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; -/** - * - */ +import java.io.IOException; + public class RestDeleteIndexAction extends BaseRestHandler { @Inject @@ -45,11 +42,11 @@ public class RestDeleteIndexAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); deleteIndexRequest.timeout(request.paramAsTime("timeout", deleteIndexRequest.timeout())); deleteIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexRequest.masterNodeTimeout())); deleteIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteIndexRequest.indicesOptions())); - client.admin().indices().delete(deleteIndexRequest, new AcknowledgedRestListener(channel)); + return channel -> client.admin().indices().delete(deleteIndexRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java index 425581fe927..3d721d27482 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java @@ -23,11 +23,12 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; + public class RestDeleteIndexTemplateAction extends BaseRestHandler { @Inject @@ -37,9 +38,9 @@ public class RestDeleteIndexTemplateAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteIndexTemplateRequest deleteIndexTemplateRequest = new DeleteIndexTemplateRequest(request.param("name")); deleteIndexTemplateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexTemplateRequest.masterNodeTimeout())); - client.admin().indices().deleteTemplate(deleteIndexTemplateRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().deleteTemplate(deleteIndexTemplateRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java index b963a805934..efab741a227 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java @@ -29,20 +29,18 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; -/** - * - */ public class RestFlushAction extends BaseRestHandler { @Inject @@ -56,12 +54,12 @@ public class RestFlushAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions())); flushRequest.force(request.paramAsBoolean("force", flushRequest.force())); flushRequest.waitIfOngoing(request.paramAsBoolean("wait_if_ongoing", flushRequest.waitIfOngoing())); - client.admin().indices().flush(flushRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().flush(flushRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(FlushResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java index c376866ad1e..75e54a2b60f 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java @@ -29,19 +29,17 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; -/** - * - */ public class RestForceMergeAction extends BaseRestHandler { @Inject @@ -52,13 +50,13 @@ public class RestForceMergeAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { ForceMergeRequest mergeRequest = new ForceMergeRequest(Strings.splitStringByCommaToArray(request.param("index"))); mergeRequest.indicesOptions(IndicesOptions.fromRequest(request, mergeRequest.indicesOptions())); mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments())); mergeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", mergeRequest.onlyExpungeDeletes())); mergeRequest.flush(request.paramAsBoolean("flush", mergeRequest.flush())); - client.admin().indices().forceMerge(mergeRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().forceMerge(mergeRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ForceMergeResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index 4b58fd0f16c..531f36aa4b3 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -32,21 +32,19 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; import java.util.List; import java.util.Locale; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; -/** - */ public class RestGetAliasesAction extends BaseRestHandler { @Inject @@ -57,7 +55,7 @@ public class RestGetAliasesAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] aliases = request.paramAsStringArrayOrEmptyIfAll("name"); final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases); @@ -65,7 +63,7 @@ public class RestGetAliasesAction extends BaseRestHandler { getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local())); - client.admin().indices().getAliases(getAliasesRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().getAliases(getAliasesRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(GetAliasesResponse response, XContentBuilder builder) throws Exception { // empty body, if indices were specified but no aliases were diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java index fba4192d5d4..ac51978a399 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java @@ -30,13 +30,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -56,7 +56,7 @@ public class RestGetFieldMappingAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final String[] types = request.paramAsStringArrayOrEmptyIfAll("type"); final String[] fields = Strings.splitStringByCommaToArray(request.param("fields")); @@ -64,26 +64,27 @@ public class RestGetFieldMappingAction extends BaseRestHandler { getMappingsRequest.indices(indices).types(types).fields(fields).includeDefaults(request.paramAsBoolean("include_defaults", false)); getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions())); getMappingsRequest.local(request.paramAsBoolean("local", getMappingsRequest.local())); - client.admin().indices().getFieldMappings(getMappingsRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(GetFieldMappingsResponse response, XContentBuilder builder) throws Exception { - Map>> mappingsByIndex = response.mappings(); + return channel -> + client.admin().indices().getFieldMappings(getMappingsRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(GetFieldMappingsResponse response, XContentBuilder builder) throws Exception { + Map>> mappingsByIndex = response.mappings(); - boolean isPossibleSingleFieldRequest = indices.length == 1 && types.length == 1 && fields.length == 1; - if (isPossibleSingleFieldRequest && isFieldMappingMissingField(mappingsByIndex)) { - return new BytesRestResponse(OK, builder.startObject().endObject()); - } + boolean isPossibleSingleFieldRequest = indices.length == 1 && types.length == 1 && fields.length == 1; + if (isPossibleSingleFieldRequest && isFieldMappingMissingField(mappingsByIndex)) { + return new BytesRestResponse(OK, builder.startObject().endObject()); + } - RestStatus status = OK; - if (mappingsByIndex.isEmpty() && fields.length > 0) { - status = NOT_FOUND; - } - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(status, builder); - } - }); + RestStatus status = OK; + if (mappingsByIndex.isEmpty() && fields.length > 0) { + status = NOT_FOUND; + } + builder.startObject(); + response.toXContent(builder, request); + builder.endObject(); + return new BytesRestResponse(status, builder); + } + }); } /** diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java index ead04532590..0d1752d7368 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; @@ -25,12 +26,14 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; +import java.util.Set; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; @@ -46,7 +49,7 @@ public class RestGetIndexTemplateAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] names = Strings.splitStringByCommaToArray(request.param("name")); GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(names); @@ -55,13 +58,22 @@ public class RestGetIndexTemplateAction extends BaseRestHandler { final boolean implicitAll = getIndexTemplatesRequest.names().length == 0; - client.admin().indices().getTemplates(getIndexTemplatesRequest, new RestToXContentListener(channel) { - @Override - protected RestStatus getStatus(GetIndexTemplatesResponse response) { - boolean templateExists = false == response.getIndexTemplates().isEmpty(); + return channel -> + client.admin() + .indices() + .getTemplates(getIndexTemplatesRequest, new RestToXContentListener(channel) { + @Override + protected RestStatus getStatus(GetIndexTemplatesResponse response) { + boolean templateExists = false == response.getIndexTemplates().isEmpty(); - return (templateExists || implicitAll) ? OK : NOT_FOUND; - } - }); + return (templateExists || implicitAll) ? OK : NOT_FOUND; + } + }); } + + @Override + protected Set responseParams() { + return Settings.FORMAT_PARAMS; + } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java index 2ad6f245cf0..b7cf07945bc 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -44,6 +43,7 @@ import org.elasticsearch.rest.action.RestBuilderListener; import java.io.IOException; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; @@ -64,7 +64,7 @@ public class RestGetIndicesAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); String[] featureParams = request.paramAsStringArray("type", null); // Work out if the indices is a list of features @@ -81,7 +81,8 @@ public class RestGetIndicesAction extends BaseRestHandler { getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); - client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener(channel) { + final boolean defaults = request.paramAsBoolean("include_defaults", false); + return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(GetIndexResponse response, XContentBuilder builder) throws Exception { @@ -100,7 +101,7 @@ public class RestGetIndicesAction extends BaseRestHandler { writeMappings(response.mappings().get(index), builder, request); break; case SETTINGS: - writeSettings(response.settings().get(index), builder, request); + writeSettings(response.settings().get(index), builder, request, defaults); break; default: throw new IllegalStateException("feature [" + feature + "] is not valid"); @@ -136,15 +137,15 @@ public class RestGetIndicesAction extends BaseRestHandler { builder.endObject(); } - private void writeSettings(Settings settings, XContentBuilder builder, Params params) throws IOException { - final boolean renderDefaults = request.paramAsBoolean("include_defaults", false); + private void writeSettings(Settings settings, XContentBuilder builder, Params params, boolean defaults) throws IOException { builder.startObject(Fields.SETTINGS); settings.toXContent(builder, params); builder.endObject(); - if (renderDefaults) { + if (defaults) { builder.startObject("defaults"); - settingsFilter.filter(indexScopedSettings.diff(settings, RestGetIndicesAction.this.settings)).toXContent(builder, - request); + settingsFilter + .filter(indexScopedSettings.diff(settings, RestGetIndicesAction.this.settings)) + .toXContent(builder, request); builder.endObject(); } } @@ -152,6 +153,11 @@ public class RestGetIndicesAction extends BaseRestHandler { }); } + @Override + protected Set responseParams() { + return Settings.FORMAT_PARAMS; + } + static class Fields { static final String ALIASES = "aliases"; static final String MAPPINGS = "mappings"; diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index b9c84729640..abe2e529b8d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.rest.action.admin.indices; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -35,18 +34,16 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; -/** - * - */ public class RestGetMappingAction extends BaseRestHandler { @Inject @@ -59,14 +56,14 @@ public class RestGetMappingAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final String[] types = request.paramAsStringArrayOrEmptyIfAll("type"); GetMappingsRequest getMappingsRequest = new GetMappingsRequest(); getMappingsRequest.indices(indices).types(types); getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions())); getMappingsRequest.local(request.paramAsBoolean("local", getMappingsRequest.local())); - client.admin().indices().getMappings(getMappingsRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().getMappings(getMappingsRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(GetMappingsResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java index 936a96e035a..10f0564de84 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -32,12 +32,13 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; @@ -58,7 +59,7 @@ public class RestGetSettingsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] names = request.paramAsStringArrayOrEmptyIfAll("name"); final boolean renderDefaults = request.paramAsBoolean("include_defaults", false); GetSettingsRequest getSettingsRequest = new GetSettingsRequest() @@ -68,7 +69,7 @@ public class RestGetSettingsAction extends BaseRestHandler { .names(names); getSettingsRequest.local(request.paramAsBoolean("local", getSettingsRequest.local())); - client.admin().indices().getSettings(getSettingsRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(GetSettingsResponse getSettingsResponse, XContentBuilder builder) throws Exception { @@ -94,4 +95,5 @@ public class RestGetSettingsAction extends BaseRestHandler { } }); } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestHeadIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestHeadIndexTemplateAction.java index 3480fbb9afc..bea821e4850 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestHeadIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestHeadIndexTemplateAction.java @@ -26,19 +26,18 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestResponseListener; +import java.io.IOException; +import java.util.Set; + import static org.elasticsearch.rest.RestRequest.Method.HEAD; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; -/** - * - */ public class RestHeadIndexTemplateAction extends BaseRestHandler { @Inject @@ -49,20 +48,29 @@ public class RestHeadIndexTemplateAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(request.param("name")); getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local())); getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout())); - client.admin().indices().getTemplates(getIndexTemplatesRequest, new RestResponseListener(channel) { - @Override - public RestResponse buildResponse(GetIndexTemplatesResponse getIndexTemplatesResponse) { - boolean templateExists = getIndexTemplatesResponse.getIndexTemplates().size() > 0; - if (templateExists) { - return new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY); - } else { - return new BytesRestResponse(NOT_FOUND, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY); - } - } - }); + return channel -> + client.admin() + .indices() + .getTemplates(getIndexTemplatesRequest, new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(GetIndexTemplatesResponse getIndexTemplatesResponse) { + boolean templateExists = getIndexTemplatesResponse.getIndexTemplates().size() > 0; + if (templateExists) { + return new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY); + } else { + return new BytesRestResponse(NOT_FOUND, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY); + } + } + }); } + + @Override + protected Set responseParams() { + return Settings.FORMAT_PARAMS; + } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java index b027aeb8d67..d169ce42825 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java @@ -19,22 +19,20 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.DELETE; -/** - */ public class RestIndexDeleteAliasesAction extends BaseRestHandler { @Inject @@ -45,7 +43,7 @@ public class RestIndexDeleteAliasesAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final String[] aliases = Strings.splitStringByCommaToArray(request.param("name")); IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); @@ -53,6 +51,6 @@ public class RestIndexDeleteAliasesAction extends BaseRestHandler { indicesAliasesRequest.addAliasAction(AliasActions.remove().indices(indices).aliases(aliases)); indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); - client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener(channel)); + return channel -> client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java index f7546bd57db..da8ab809c10 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java @@ -27,11 +27,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -58,7 +58,7 @@ public class RestIndexPutAliasAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); String alias = request.param("name"); Map filter = null; @@ -117,6 +117,6 @@ public class RestIndexPutAliasAction extends BaseRestHandler { aliasAction.filter(filter); } indicesAliasesRequest.addAliasAction(aliasAction); - client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java index fe8a6a16628..d02e2caf5a3 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java @@ -32,11 +32,12 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestIndicesAliasesAction extends BaseRestHandler { @@ -56,7 +57,7 @@ public class RestIndicesAliasesAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout())); @@ -66,6 +67,6 @@ public class RestIndicesAliasesAction extends BaseRestHandler { if (indicesAliasesRequest.getAliasActions().isEmpty()) { throw new IllegalArgumentException("No action specified"); } - client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener(channel)); + return channel -> client.admin().indices().aliases(indicesAliasesRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesExistsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesExistsAction.java index fa62a844356..ed7e656aa0a 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesExistsAction.java @@ -29,19 +29,17 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestResponseListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.HEAD; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; -/** - * - */ public class RestIndicesExistsAction extends BaseRestHandler { @Inject @@ -51,11 +49,13 @@ public class RestIndicesExistsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndicesExistsRequest indicesExistsRequest = new IndicesExistsRequest(Strings.splitStringByCommaToArray(request.param("index"))); - indicesExistsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesExistsRequest.indicesOptions())); + IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, indicesExistsRequest.indicesOptions()); + indicesExistsRequest.expandWilcardsOpen(indicesOptions.expandWildcardsOpen()); + indicesExistsRequest.expandWilcardsClosed(indicesOptions.expandWildcardsClosed()); indicesExistsRequest.local(request.paramAsBoolean("local", indicesExistsRequest.local())); - client.admin().indices().exists(indicesExistsRequest, new RestResponseListener(channel) { + return channel -> client.admin().indices().exists(indicesExistsRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(IndicesExistsResponse response) { if (response.isExists()) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java index db9de980c52..556bb5b1d1c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java @@ -29,12 +29,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; @@ -49,20 +50,21 @@ public class RestIndicesSegmentsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndicesSegmentsRequest indicesSegmentsRequest = new IndicesSegmentsRequest( Strings.splitStringByCommaToArray(request.param("index"))); indicesSegmentsRequest.verbose(request.paramAsBoolean("verbose", false)); indicesSegmentsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesSegmentsRequest.indicesOptions())); - client.admin().indices().segments(indicesSegmentsRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(IndicesSegmentResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> + client.admin().indices().segments(indicesSegmentsRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(IndicesSegmentResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + buildBroadcastShardsHeader(builder, request, response); + response.toXContent(builder, request); + builder.endObject(); + return new BytesRestResponse(OK, builder); + } + }); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java index 65c0dc8aa45..cf1ef9ce5ee 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java @@ -30,12 +30,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; @@ -52,21 +53,26 @@ public class RestIndicesShardStoresAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndicesShardStoresRequest indicesShardStoresRequest = new IndicesShardStoresRequest( Strings.splitStringByCommaToArray(request.param("index"))); if (request.hasParam("status")) { indicesShardStoresRequest.shardStatuses(Strings.splitStringByCommaToArray(request.param("status"))); } indicesShardStoresRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesShardStoresRequest.indicesOptions())); - client.admin().indices().shardStores(indicesShardStoresRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(IndicesShardStoresResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> + client.admin() + .indices() + .shardStores(indicesShardStoresRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse( + IndicesShardStoresResponse response, + XContentBuilder builder) throws Exception { + builder.startObject(); + response.toXContent(builder, request); + builder.endObject(); + return new BytesRestResponse(OK, builder); + } + }); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java index c7dd62688fa..e7336225c5e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java @@ -29,12 +29,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; +import java.util.Collections; import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -54,7 +55,7 @@ public class RestIndicesStatsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); indicesStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesStatsRequest.indicesOptions())); indicesStatsRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); @@ -106,7 +107,7 @@ public class RestIndicesStatsAction extends BaseRestHandler { indicesStatsRequest.includeSegmentFileSizes(true); } - client.admin().indices().stats(indicesStatsRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().stats(indicesStatsRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(IndicesStatsResponse response, XContentBuilder builder) throws Exception { builder.startObject(); @@ -122,4 +123,12 @@ public class RestIndicesStatsAction extends BaseRestHandler { public boolean canTripCircuitBreaker() { return false; } + + private static final Set RESPONSE_PARAMS = Collections.singleton("level"); + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; + } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java index dd40705769f..23331c7d4a6 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java @@ -27,14 +27,12 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; -/** - * - */ +import java.io.IOException; + public class RestOpenIndexAction extends BaseRestHandler { @Inject @@ -45,11 +43,11 @@ public class RestOpenIndexAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { OpenIndexRequest openIndexRequest = new OpenIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); openIndexRequest.timeout(request.paramAsTime("timeout", openIndexRequest.timeout())); openIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", openIndexRequest.masterNodeTimeout())); openIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, openIndexRequest.indicesOptions())); - client.admin().indices().open(openIndexRequest, new AcknowledgedRestListener(channel)); + return channel -> client.admin().indices().open(openIndexRequest, new AcknowledgedRestListener(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java index 3deba4c32f3..aee4eb3a9e2 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; @@ -23,14 +24,12 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; -/** - * - */ +import java.io.IOException; + public class RestPutIndexTemplateAction extends BaseRestHandler { @Inject @@ -41,7 +40,7 @@ public class RestPutIndexTemplateAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest(request.param("name")); putRequest.template(request.param("template", putRequest.template())); putRequest.order(request.paramAsInt("order", putRequest.order())); @@ -49,6 +48,7 @@ public class RestPutIndexTemplateAction extends BaseRestHandler { putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "")); putRequest.source(request.content()); - client.admin().indices().putTemplate(putRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().putTemplate(putRequest, new AcknowledgedRestListener<>(channel)); } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java index 3a582d0b0a9..0fa394e6f3d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java @@ -27,18 +27,16 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; + import static org.elasticsearch.client.Requests.putMappingRequest; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; -/** - * - */ public class RestPutMappingAction extends BaseRestHandler { @@ -68,7 +66,7 @@ public class RestPutMappingAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PutMappingRequest putMappingRequest = putMappingRequest(Strings.splitStringByCommaToArray(request.param("index"))); putMappingRequest.type(request.param("type")); putMappingRequest.source(request.content().utf8ToString()); @@ -76,6 +74,6 @@ public class RestPutMappingAction extends BaseRestHandler { putMappingRequest.timeout(request.paramAsTime("timeout", putMappingRequest.timeout())); putMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putMappingRequest.masterNodeTimeout())); putMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, putMappingRequest.indicesOptions())); - client.admin().indices().putMapping(putMappingRequest, new AcknowledgedRestListener(channel)); + return channel -> client.admin().indices().putMapping(putMappingRequest, new AcknowledgedRestListener(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java index 5dee73606fd..c20c70c8c25 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java @@ -29,12 +29,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; @@ -51,14 +52,14 @@ public class RestRecoveryAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final RecoveryRequest recoveryRequest = new RecoveryRequest(Strings.splitStringByCommaToArray(request.param("index"))); recoveryRequest.detailed(request.paramAsBoolean("detailed", false)); recoveryRequest.activeOnly(request.paramAsBoolean("active_only", false)); recoveryRequest.indicesOptions(IndicesOptions.fromRequest(request, recoveryRequest.indicesOptions())); - client.admin().indices().recoveries(recoveryRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().recoveries(recoveryRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(RecoveryResponse response, XContentBuilder builder) throws Exception { response.detailed(recoveryRequest.detailed()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java index 54088a7ddb2..96afc014e97 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java @@ -29,20 +29,18 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; -/** - * - */ public class RestRefreshAction extends BaseRestHandler { @Inject @@ -56,10 +54,10 @@ public class RestRefreshAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { RefreshRequest refreshRequest = new RefreshRequest(Strings.splitStringByCommaToArray(request.param("index"))); refreshRequest.indicesOptions(IndicesOptions.fromRequest(request, refreshRequest.indicesOptions())); - client.admin().indices().refresh(refreshRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().refresh(refreshRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(RefreshResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java index 1433bc42571..7c68fea4357 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java @@ -20,19 +20,17 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -/** - * - */ +import java.io.IOException; + public class RestRolloverIndexAction extends BaseRestHandler { @Inject @@ -44,7 +42,7 @@ public class RestRolloverIndexAction extends BaseRestHandler { @SuppressWarnings({"unchecked"}) @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { RolloverRequest rolloverIndexRequest = new RolloverRequest(request.param("index"), request.param("new_index")); if (request.hasContent()) { rolloverIndexRequest.source(request.content()); @@ -53,6 +51,6 @@ public class RestRolloverIndexAction extends BaseRestHandler { rolloverIndexRequest.timeout(request.paramAsTime("timeout", rolloverIndexRequest.timeout())); rolloverIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", rolloverIndexRequest.masterNodeTimeout())); rolloverIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - client.admin().indices().rolloverIndex(rolloverIndexRequest, new RestToXContentListener<>(channel)); + return channel -> client.admin().indices().rolloverIndex(rolloverIndexRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java index f04c9760a63..fb65825ac6e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java @@ -20,23 +20,19 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.shrink.ShrinkRequest; -import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.action.admin.indices.shrink.ShrinkResponse; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; import java.io.IOException; -/** - * - */ public class RestShrinkIndexAction extends BaseRestHandler { @Inject @@ -48,7 +44,7 @@ public class RestShrinkIndexAction extends BaseRestHandler { @SuppressWarnings({"unchecked"}) @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { if (request.param("target") == null) { throw new IllegalArgumentException("no target index"); } @@ -62,7 +58,7 @@ public class RestShrinkIndexAction extends BaseRestHandler { shrinkIndexRequest.timeout(request.paramAsTime("timeout", shrinkIndexRequest.timeout())); shrinkIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", shrinkIndexRequest.masterNodeTimeout())); shrinkIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - client.admin().indices().shrinkIndex(shrinkIndexRequest, new AcknowledgedRestListener(channel) { + return channel -> client.admin().indices().shrinkIndex(shrinkIndexRequest, new AcknowledgedRestListener(channel) { @Override public void addCustomFields(XContentBuilder builder, ShrinkResponse response) throws IOException { response.addCustomFields(builder); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java index 784a588db89..f645c32be71 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java @@ -29,18 +29,16 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -/** - * - */ public class RestSyncedFlushAction extends BaseRestHandler { @Inject @@ -54,11 +52,11 @@ public class RestSyncedFlushAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen()); SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); syncedFlushRequest.indicesOptions(indicesOptions); - client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(SyncedFlushResponse results, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestTypesExistsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestTypesExistsAction.java index 3877715395c..799ba8b1c62 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestTypesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestTypesExistsAction.java @@ -28,12 +28,13 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestResponseListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.HEAD; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; @@ -52,13 +53,13 @@ public class RestTypesExistsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { TypesExistsRequest typesExistsRequest = new TypesExistsRequest( Strings.splitStringByCommaToArray(request.param("index")), Strings.splitStringByCommaToArray(request.param("type")) ); typesExistsRequest.local(request.paramAsBoolean("local", typesExistsRequest.local())); typesExistsRequest.indicesOptions(IndicesOptions.fromRequest(request, typesExistsRequest.indicesOptions())); - client.admin().indices().typesExists(typesExistsRequest, new RestResponseListener(channel) { + return channel -> client.admin().indices().typesExists(typesExistsRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(TypesExistsResponse response) throws Exception { if (response.isExists()) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 0c1b535901e..4ba011e698e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -26,11 +26,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; import java.util.Map; import java.util.Set; @@ -38,9 +38,6 @@ import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.client.Requests.updateSettingsRequest; import static org.elasticsearch.common.util.set.Sets.newHashSet; -/** - * - */ public class RestUpdateSettingsAction extends BaseRestHandler { private static final Set VALUES_TO_EXCLUDE = unmodifiableSet(newHashSet( "pretty", @@ -60,7 +57,7 @@ public class RestUpdateSettingsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { UpdateSettingsRequest updateSettingsRequest = updateSettingsRequest(Strings.splitStringByCommaToArray(request.param("index"))); updateSettingsRequest.timeout(request.paramAsTime("timeout", updateSettingsRequest.timeout())); updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); @@ -89,6 +86,12 @@ public class RestUpdateSettingsAction extends BaseRestHandler { } updateSettingsRequest.settings(updateSettings); - client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener<>(channel)); } + + @Override + protected Set responseParams() { + return Settings.FORMAT_PARAMS; + } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java index e0659e1cf52..9882b5bea3d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java @@ -20,9 +20,11 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -31,12 +33,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; +import java.io.IOException; import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -44,7 +46,6 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; - public class RestUpgradeAction extends BaseRestHandler { @Inject @@ -58,31 +59,35 @@ public class RestUpgradeAction extends BaseRestHandler { } @Override - public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { if (request.method().equals(RestRequest.Method.GET)) { - handleGet(request, channel, client); + return handleGet(request, client); } else if (request.method().equals(RestRequest.Method.POST)) { - handlePost(request, channel, client); + return handlePost(request, client); + } else { + throw new IllegalArgumentException("illegal method [" + request.method() + "] for request [" + request.path() + "]"); } } - void handleGet(final RestRequest request, RestChannel channel, NodeClient client) { - client.admin().indices().prepareUpgradeStatus(Strings.splitStringByCommaToArray(request.param("index"))) - .execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + private RestChannelConsumer handleGet(final RestRequest request, NodeClient client) { + UpgradeStatusRequest statusRequest = new UpgradeStatusRequest(Strings.splitStringByCommaToArray(request.param("index"))); + statusRequest.indicesOptions(IndicesOptions.fromRequest(request, statusRequest.indicesOptions())); + return channel -> client.admin().indices().upgradeStatus(statusRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + response.toXContent(builder, request); + builder.endObject(); + return new BytesRestResponse(OK, builder); + } + }); } - void handlePost(final RestRequest request, RestChannel channel, NodeClient client) { + private RestChannelConsumer handlePost(final RestRequest request, NodeClient client) { UpgradeRequest upgradeReq = new UpgradeRequest(Strings.splitStringByCommaToArray(request.param("index"))); + upgradeReq.indicesOptions(IndicesOptions.fromRequest(request, upgradeReq.indicesOptions())); upgradeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false)); - client.admin().indices().upgrade(upgradeReq, new RestBuilderListener(channel) { + return channel -> client.admin().indices().upgrade(upgradeReq, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(UpgradeResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java index 7bf2a34ef63..3a1ea5fa7d0 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java @@ -47,9 +47,6 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; -/** - * - */ public class RestValidateQueryAction extends BaseRestHandler { private final IndicesQueriesRegistry indicesQueriesRegistry; @@ -67,58 +64,69 @@ public class RestValidateQueryAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest(Strings.splitStringByCommaToArray(request.param("index"))); validateQueryRequest.indicesOptions(IndicesOptions.fromRequest(request, validateQueryRequest.indicesOptions())); validateQueryRequest.explain(request.paramAsBoolean("explain", false)); - if (RestActions.hasBodyContent(request)) { - try { - validateQueryRequest - .query(RestActions.getQueryContent(RestActions.getRestContent(request), indicesQueriesRegistry, parseFieldMatcher)); - } catch(ParsingException e) { - channel.sendResponse(buildErrorResponse(channel.newBuilder(), e.getDetailedMessage(), validateQueryRequest.explain())); - return; - } catch(Exception e) { - channel.sendResponse(buildErrorResponse(channel.newBuilder(), e.getMessage(), validateQueryRequest.explain())); - return; - } - } else { - QueryBuilder queryBuilder = RestActions.urlParamsToQueryBuilder(request); - if (queryBuilder != null) { - validateQueryRequest.query(queryBuilder); - } - } validateQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); validateQueryRequest.rewrite(request.paramAsBoolean("rewrite", false)); - client.admin().indices().validateQuery(validateQueryRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ValidateQueryResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - builder.field(VALID_FIELD, response.isValid()); - buildBroadcastShardsHeader(builder, request, response); - if (response.getQueryExplanation() != null && !response.getQueryExplanation().isEmpty()) { - builder.startArray(EXPLANATIONS_FIELD); - for (QueryExplanation explanation : response.getQueryExplanation()) { + Exception bodyParsingException = null; + if (RestActions.hasBodyContent(request)) { + try { + validateQueryRequest.query( + RestActions.getQueryContent(RestActions.getRestContent(request), indicesQueriesRegistry, parseFieldMatcher)); + } catch (Exception e) { + bodyParsingException = e; + } + } else if (request.hasParam("q")) { + QueryBuilder queryBuilder = RestActions.urlParamsToQueryBuilder(request); + validateQueryRequest.query(queryBuilder); + } + + final Exception finalBodyParsingException = bodyParsingException; + return channel -> { + if (finalBodyParsingException != null) { + if (finalBodyParsingException instanceof ParsingException) { + handleException(validateQueryRequest, ((ParsingException) finalBodyParsingException).getDetailedMessage(), channel); + } else { + handleException(validateQueryRequest, finalBodyParsingException.getMessage(), channel); + } + } else { + client.admin().indices().validateQuery(validateQueryRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(ValidateQueryResponse response, XContentBuilder builder) throws Exception { builder.startObject(); - if (explanation.getIndex() != null) { - builder.field(INDEX_FIELD, explanation.getIndex()); - } - builder.field(VALID_FIELD, explanation.isValid()); - if (explanation.getError() != null) { - builder.field(ERROR_FIELD, explanation.getError()); - } - if (explanation.getExplanation() != null) { - builder.field(EXPLANATION_FIELD, explanation.getExplanation()); + builder.field(VALID_FIELD, response.isValid()); + buildBroadcastShardsHeader(builder, request, response); + if (response.getQueryExplanation() != null && !response.getQueryExplanation().isEmpty()) { + builder.startArray(EXPLANATIONS_FIELD); + for (QueryExplanation explanation : response.getQueryExplanation()) { + builder.startObject(); + if (explanation.getIndex() != null) { + builder.field(INDEX_FIELD, explanation.getIndex()); + } + builder.field(VALID_FIELD, explanation.isValid()); + if (explanation.getError() != null) { + builder.field(ERROR_FIELD, explanation.getError()); + } + if (explanation.getExplanation() != null) { + builder.field(EXPLANATION_FIELD, explanation.getExplanation()); + } + builder.endObject(); + } + builder.endArray(); } builder.endObject(); + return new BytesRestResponse(OK, builder); } - builder.endArray(); - } - builder.endObject(); - return new BytesRestResponse(OK, builder); + }); } - }); + }; + } + + private void handleException(final ValidateQueryRequest request, final String message, final RestChannel channel) throws IOException { + channel.sendResponse(buildErrorResponse(channel.newBuilder(), message, request.explain())); } private static BytesRestResponse buildErrorResponse(XContentBuilder builder, String error, boolean explain) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java index 8315e34d08e..7289e9c76e7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java @@ -25,49 +25,62 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + import static org.elasticsearch.rest.action.cat.RestTable.buildHelpWidths; import static org.elasticsearch.rest.action.cat.RestTable.pad; -/** - * - */ public abstract class AbstractCatAction extends BaseRestHandler { public AbstractCatAction(Settings settings) { super(settings); } - protected abstract void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client); + protected abstract RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client); protected abstract void documentation(StringBuilder sb); protected abstract Table getTableWithHeader(final RestRequest request); @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { boolean helpWanted = request.paramAsBoolean("help", false); if (helpWanted) { - Table table = getTableWithHeader(request); - int[] width = buildHelpWidths(table, request); - BytesStreamOutput bytesOutput = channel.bytesOutput(); - UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOutput); - for (Table.Cell cell : table.getHeaders()) { - // need to do left-align always, so create new cells - pad(new Table.Cell(cell.value), width[0], request, out); - out.append(" | "); - pad(new Table.Cell(cell.attr.containsKey("alias") ? cell.attr.get("alias") : ""), width[1], request, out); - out.append(" | "); - pad(new Table.Cell(cell.attr.containsKey("desc") ? cell.attr.get("desc") : "not available"), width[2], request, out); - out.append("\n"); - } - out.close(); - channel.sendResponse(new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, bytesOutput.bytes())); + return channel -> { + Table table = getTableWithHeader(request); + int[] width = buildHelpWidths(table, request); + BytesStreamOutput bytesOutput = channel.bytesOutput(); + UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOutput); + for (Table.Cell cell : table.getHeaders()) { + // need to do left-align always, so create new cells + pad(new Table.Cell(cell.value), width[0], request, out); + out.append(" | "); + pad(new Table.Cell(cell.attr.containsKey("alias") ? cell.attr.get("alias") : ""), width[1], request, out); + out.append(" | "); + pad(new Table.Cell(cell.attr.containsKey("desc") ? cell.attr.get("desc") : "not available"), width[2], request, out); + out.append("\n"); + } + out.close(); + channel.sendResponse(new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, bytesOutput.bytes())); + }; } else { - doRequest(request, channel, client); + return doCatRequest(request, client); } } + + static Set RESPONSE_PARAMS = + Collections.unmodifiableSet(new HashSet<>(Arrays.asList("format", "h", "v", "ts", "pri", "bytes", "size", "time", "s"))); + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; + } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java index 82d59784fc9..981b573f0dc 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -37,9 +36,6 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; -/** - * - */ public class RestAliasAction extends AbstractCatAction { @Inject @@ -51,13 +47,13 @@ public class RestAliasAction extends AbstractCatAction { @Override - protected void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final GetAliasesRequest getAliasesRequest = request.hasParam("alias") ? new GetAliasesRequest(request.param("alias")) : new GetAliasesRequest(); getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local())); - client.admin().indices().getAliases(getAliasesRequest, new RestResponseListener(channel) { + return channel -> client.admin().indices().getAliases(getAliasesRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(GetAliasesResponse response) throws Exception { Table tab = buildTable(request, response); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index c8138780889..86ff1c44801 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -59,14 +58,14 @@ public class RestAllocationAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final String[] nodes = Strings.splitStringByCommaToArray(request.param("nodes", "data:true")); final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().routingTable(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); - client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override public void processResponse(final ClusterStateResponse state) { NodesStatsRequest statsRequest = new NodesStatsRequest(nodes); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java index b9cc5011a81..c95b35a989c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestCatAction.java @@ -24,11 +24,11 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -52,7 +52,8 @@ public class RestCatAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { - channel.sendResponse(new BytesRestResponse(RestStatus.OK, HELP)); + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + return channel -> channel.sendResponse(new BytesRestResponse(RestStatus.OK, HELP)); } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java index 4faddc3168c..6af900ce516 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -58,7 +57,7 @@ public class RestCountAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); SearchRequest countRequest = new SearchRequest(indices); String source = request.param("source"); @@ -72,7 +71,7 @@ public class RestCountAction extends AbstractCatAction { searchSourceBuilder.query(queryBuilder); } } - client.search(countRequest, new RestResponseListener(channel) { + return channel -> client.search(countRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(SearchResponse countResponse) throws Exception { return RestTable.buildResponse(buildTable(request, countResponse), channel); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java index fcdad0c3f7e..4ad4d8aca27 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -49,14 +48,14 @@ public class RestFielddataAction extends AbstractCatAction { } @Override - protected void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true"); nodesStatsRequest.clear(); nodesStatsRequest.indices(true); String[] fields = request.paramAsStringArray("fields", null); nodesStatsRequest.indices().fieldDataFields(fields == null ? new String[] {"*"} : fields); - client.admin().cluster().nodesStats(nodesStatsRequest, new RestResponseListener(channel) { + return channel -> client.admin().cluster().nodesStats(nodesStatsRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(NodesStatsResponse nodeStatses) throws Exception { return RestTable.buildResponse(buildTable(request, nodeStatses), channel); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java index cd226e28b56..fc6462bcaa6 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -49,10 +48,10 @@ public class RestHealthAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { ClusterHealthRequest clusterHealthRequest = new ClusterHealthRequest(); - client.admin().cluster().health(clusterHealthRequest, new RestResponseListener(channel) { + return channel -> client.admin().cluster().health(clusterHealthRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(final ClusterHealthResponse health) throws Exception { return RestTable.buildResponse(buildTable(health, request), channel); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index 782c0ea4441..e56347f16be 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -40,7 +40,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -49,7 +48,11 @@ import org.elasticsearch.rest.action.RestResponseListener; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; import java.util.Locale; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -72,7 +75,7 @@ public class RestIndicesAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().indices(indices).metaData(true); @@ -81,7 +84,7 @@ public class RestIndicesAction extends AbstractCatAction { final IndicesOptions strictExpandIndicesOptions = IndicesOptions.strictExpand(); clusterStateRequest.indicesOptions(strictExpandIndicesOptions); - client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override public void processResponse(final ClusterStateResponse clusterStateResponse) { final ClusterState state = clusterStateResponse.getState(); @@ -123,6 +126,19 @@ public class RestIndicesAction extends AbstractCatAction { }); } + private static final Set RESPONSE_PARAMS; + + static { + final Set responseParams = new HashSet<>(Arrays.asList("local", "health")); + responseParams.addAll(AbstractCatAction.RESPONSE_PARAMS); + RESPONSE_PARAMS = Collections.unmodifiableSet(responseParams); + } + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; + } + @Override protected Table getTableWithHeader(final RestRequest request) { Table table = new Table(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java index 5902ba60e57..be2aeaafd4e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -49,13 +48,13 @@ public class RestMasterAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); - client.admin().cluster().state(clusterStateRequest, new RestResponseListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(final ClusterStateResponse clusterStateResponse) throws Exception { return RestTable.buildResponse(buildTable(request, clusterStateResponse), channel); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java index 5ab98316c7c..a5f33d916fd 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java @@ -31,8 +31,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -57,13 +56,13 @@ public class RestNodeAttrsAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); - client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override public void processResponse(final ClusterStateResponse clusterStateResponse) { NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); @@ -109,11 +108,7 @@ public class RestNodeAttrsAction extends AbstractCatAction { table.addCell(info == null ? null : info.getProcess().getId()); table.addCell(node.getHostName()); table.addCell(node.getHostAddress()); - if (node.getAddress() instanceof InetSocketTransportAddress) { - table.addCell(((InetSocketTransportAddress) node.getAddress()).address().getPort()); - } else { - table.addCell("-"); - } + table.addCell(node.getAddress().address().getPort()); table.addCell(attrEntry.getKey()); table.addCell(attrEntry.getValue()); table.endRow(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 2c1900feefa..b632448192d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.HttpInfo; import org.elasticsearch.index.cache.query.QueryCacheStats; @@ -54,7 +53,6 @@ import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmStats; import org.elasticsearch.monitor.os.OsStats; import org.elasticsearch.monitor.process.ProcessStats; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -82,13 +80,13 @@ public class RestNodesAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); - - client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { + final boolean fullId = request.paramAsBoolean("full_id", false); + return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override public void processResponse(final ClusterStateResponse clusterStateResponse) { NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); @@ -101,7 +99,8 @@ public class RestNodesAction extends AbstractCatAction { client.admin().cluster().nodesStats(nodesStatsRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(NodesStatsResponse nodesStatsResponse) throws Exception { - return RestTable.buildResponse(buildTable(request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse), channel); + return RestTable.buildResponse(buildTable(fullId, request, clusterStateResponse, nodesInfoResponse, + nodesStatsResponse), channel); } }); } @@ -131,7 +130,8 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("ram.percent", "alias:rp,ramPercent;text-align:right;desc:used machine memory ratio"); table.addCell("ram.max", "default:false;alias:rm,ramMax;text-align:right;desc:total machine memory"); table.addCell("file_desc.current", "default:false;alias:fdc,fileDescriptorCurrent;text-align:right;desc:used file descriptors"); - table.addCell("file_desc.percent", "default:false;alias:fdp,fileDescriptorPercent;text-align:right;desc:used file descriptor ratio"); + table.addCell("file_desc.percent", + "default:false;alias:fdp,fileDescriptorPercent;text-align:right;desc:used file descriptor ratio"); table.addCell("file_desc.max", "default:false;alias:fdm,fileDescriptorMax;text-align:right;desc:max file descriptors"); table.addCell("cpu", "alias:cpu;text-align:right;desc:recent cpu usage"); @@ -139,7 +139,8 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("load_5m", "alias:l;text-align:right;desc:5m load avg"); table.addCell("load_15m", "alias:l;text-align:right;desc:15m load avg"); table.addCell("uptime", "default:false;alias:u;text-align:right;desc:node uptime"); - table.addCell("node.role", "alias:r,role,nodeRole;desc:m:master eligible node, d:data node, i:ingest node, -:coordinating node only"); + table.addCell("node.role", + "alias:r,role,nodeRole;desc:m:master eligible node, d:data node, i:ingest node, -:coordinating node only"); table.addCell("master", "alias:m;desc:*:current master"); table.addCell("name", "alias:n;desc:node name"); @@ -152,9 +153,12 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("query_cache.evictions", "alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); table.addCell("request_cache.memory_size", "alias:rcm,requestCacheMemory;default:false;text-align:right;desc:used request cache"); - table.addCell("request_cache.evictions", "alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions"); - table.addCell("request_cache.hit_count", "alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts"); - table.addCell("request_cache.miss_count", "alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts"); + table.addCell("request_cache.evictions", + "alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions"); + table.addCell("request_cache.hit_count", + "alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts"); + table.addCell("request_cache.miss_count", + "alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts"); table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes"); table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush"); @@ -167,16 +171,20 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("get.missing_time", "alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets"); table.addCell("get.missing_total", "alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets"); - table.addCell("indexing.delete_current", "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions"); + table.addCell("indexing.delete_current", + "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions"); table.addCell("indexing.delete_time", "alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions"); table.addCell("indexing.delete_total", "alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops"); - table.addCell("indexing.index_current", "alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops"); + table.addCell("indexing.index_current", + "alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops"); table.addCell("indexing.index_time", "alias:iiti,indexingIndexTime;default:false;text-align:right;desc:time spent in indexing"); table.addCell("indexing.index_total", "alias:iito,indexingIndexTotal;default:false;text-align:right;desc:number of indexing ops"); - table.addCell("indexing.index_failed", "alias:iif,indexingIndexFailed;default:false;text-align:right;desc:number of failed indexing ops"); + table.addCell("indexing.index_failed", + "alias:iif,indexingIndexFailed;default:false;text-align:right;desc:number of failed indexing ops"); table.addCell("merges.current", "alias:mc,mergesCurrent;default:false;text-align:right;desc:number of current merges"); - table.addCell("merges.current_docs", "alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs"); + table.addCell("merges.current_docs", + "alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs"); table.addCell("merges.current_size", "alias:mcs,mergesCurrentSize;default:false;text-align:right;desc:size of current merges"); table.addCell("merges.total", "alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops"); table.addCell("merges.total_docs", "alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged"); @@ -187,7 +195,8 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); table.addCell("script.compilations", "alias:scrcc,scriptCompilations;default:false;text-align:right;desc:script compilations"); - table.addCell("script.cache_evictions", "alias:scrce,scriptCacheEvictions;default:false;text-align:right;desc:script cache evictions"); + table.addCell("script.cache_evictions", + "alias:scrce,scriptCacheEvictions;default:false;text-align:right;desc:script cache evictions"); table.addCell("search.fetch_current", "alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops"); table.addCell("search.fetch_time", "alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase"); @@ -197,14 +206,19 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); - table.addCell("search.scroll_time", "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open"); + table.addCell("search.scroll_time", + "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open"); table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); - table.addCell("segments.index_writer_memory", "alias:siwm,segmentsIndexWriterMemory;default:false;text-align:right;desc:memory used by index writer"); - table.addCell("segments.version_map_memory", "alias:svmm,segmentsVersionMapMemory;default:false;text-align:right;desc:memory used by version map"); - table.addCell("segments.fixed_bitset_memory", "alias:sfbm,fixedBitsetMemory;default:false;text-align:right;desc:memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields"); + table.addCell("segments.index_writer_memory", + "alias:siwm,segmentsIndexWriterMemory;default:false;text-align:right;desc:memory used by index writer"); + table.addCell("segments.version_map_memory", + "alias:svmm,segmentsVersionMapMemory;default:false;text-align:right;desc:memory used by version map"); + table.addCell("segments.fixed_bitset_memory", + "alias:sfbm,fixedBitsetMemory;default:false;text-align:right;desc:memory used by fixed bit sets for nested object field types" + + " and type filters for types referred in _parent fields"); table.addCell("suggest.current", "alias:suc,suggestCurrent;default:false;text-align:right;desc:number of current suggest ops"); table.addCell("suggest.time", "alias:suti,suggestTime;default:false;text-align:right;desc:time spend in suggest"); @@ -214,8 +228,8 @@ public class RestNodesAction extends AbstractCatAction { return table; } - private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats) { - boolean fullId = req.paramAsBoolean("full_id", false); + private Table buildTable(boolean fullId, RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, + NodesStatsResponse nodesStats) { DiscoveryNodes nodes = state.getState().nodes(); String masterId = nodes.getMasterNodeId(); @@ -237,19 +251,11 @@ public class RestNodesAction extends AbstractCatAction { table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4)); table.addCell(info == null ? null : info.getProcess().getId()); table.addCell(node.getHostAddress()); - if (node.getAddress() instanceof InetSocketTransportAddress) { - table.addCell(((InetSocketTransportAddress) node.getAddress()).address().getPort()); - } else { - table.addCell("-"); - } + table.addCell(node.getAddress().address().getPort()); final HttpInfo httpInfo = info == null ? null : info.getHttp(); if (httpInfo != null) { TransportAddress transportAddress = httpInfo.getAddress().publishAddress(); - if (transportAddress instanceof InetSocketTransportAddress) { - table.addCell(NetworkAddress.format(((InetSocketTransportAddress)transportAddress).address())); - } else { - table.addCell(transportAddress.toString()); - } + table.addCell(NetworkAddress.format(transportAddress.address())); } else { table.addCell("-"); } @@ -265,14 +271,18 @@ public class RestNodesAction extends AbstractCatAction { table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getUsedPercent()); table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getTotal()); table.addCell(processStats == null ? null : processStats.getOpenFileDescriptors()); - table.addCell(processStats == null ? null : calculatePercentage(processStats.getOpenFileDescriptors(), processStats.getMaxFileDescriptors())); + table.addCell(processStats == null ? null : calculatePercentage(processStats.getOpenFileDescriptors(), + processStats.getMaxFileDescriptors())); table.addCell(processStats == null ? null : processStats.getMaxFileDescriptors()); table.addCell(osStats == null ? null : Short.toString(osStats.getCpu().getPercent())); boolean hasLoadAverage = osStats != null && osStats.getCpu().getLoadAverage() != null; - table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[0] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0])); - table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[1] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1])); - table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[2] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2])); + table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[0] == -1 ? null : + String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0])); + table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[1] == -1 ? null : + String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1])); + table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[2] == -1 ? null : + String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2])); table.addCell(jvmStats == null ? null : jvmStats.getUptime()); final String roles; diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java index 773c6d292b5..aa92164506b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -47,17 +46,20 @@ public class RestPendingClusterTasksAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); - client.admin().cluster().pendingClusterTasks(pendingClusterTasksRequest, new RestResponseListener(channel) { - @Override - public RestResponse buildResponse(PendingClusterTasksResponse pendingClusterTasks) throws Exception { - Table tab = buildTable(request, pendingClusterTasks); - return RestTable.buildResponse(tab, channel); - } - }); + return channel -> + client.admin() + .cluster() + .pendingClusterTasks(pendingClusterTasksRequest, new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(PendingClusterTasksResponse pendingClusterTasks) throws Exception { + Table tab = buildTable(request, pendingClusterTasks); + return RestTable.buildResponse(tab, channel); + } + }); } @Override diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java index ef8385653f1..b729c4879f3 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.PluginInfo; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -54,13 +53,13 @@ public class RestPluginsAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); - client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override public void processResponse(final ClusterStateResponse clusterStateResponse) throws Exception { NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java index b0ab8db8b29..612c2aaf427 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -65,13 +64,13 @@ public class RestRecoveryAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final RecoveryRequest recoveryRequest = new RecoveryRequest(Strings.splitStringByCommaToArray(request.param("index"))); recoveryRequest.detailed(request.paramAsBoolean("detailed", false)); recoveryRequest.activeOnly(request.paramAsBoolean("active_only", false)); recoveryRequest.indicesOptions(IndicesOptions.fromRequest(request, recoveryRequest.indicesOptions())); - client.admin().indices().recoveries(recoveryRequest, new RestResponseListener(channel) { + return channel -> client.admin().indices().recoveries(recoveryRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(final RecoveryResponse response) throws Exception { return RestTable.buildResponse(buildRecoveryTable(request, response), channel); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java index 05130504e50..71aab69ad49 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -45,17 +44,20 @@ public class RestRepositoriesAction extends AbstractCatAction { } @Override - protected void doRequest(RestRequest request, RestChannel channel, NodeClient client) { + protected RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); getRepositoriesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRepositoriesRequest.masterNodeTimeout())); - client.admin().cluster().getRepositories(getRepositoriesRequest, new RestResponseListener(channel) { - @Override - public RestResponse buildResponse(GetRepositoriesResponse getRepositoriesResponse) throws Exception { - return RestTable.buildResponse(buildTable(request, getRepositoriesResponse), channel); - } - }); + return channel -> + client.admin() + .cluster() + .getRepositories(getRepositoriesRequest, new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(GetRepositoriesResponse getRepositoriesResponse) throws Exception { + return RestTable.buildResponse(buildTable(request, getRepositoriesResponse), channel); + } + }); } @Override diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java index d2b30c49ca9..6b665d957d2 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Segment; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -55,7 +54,7 @@ public class RestSegmentsAction extends AbstractCatAction { } @Override - protected void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); @@ -63,7 +62,7 @@ public class RestSegmentsAction extends AbstractCatAction { clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices); - client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override public void processResponse(final ClusterStateResponse clusterStateResponse) { final IndicesSegmentsRequest indicesSegmentsRequest = new IndicesSegmentsRequest(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 1864dd8b549..f8337fd25c4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -63,13 +62,13 @@ public class RestShardsAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); clusterStateRequest.clear().nodes(true).metaData(true).routingTable(true).indices(indices); - client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override public void processResponse(final ClusterStateResponse clusterStateResponse) { IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java index 021b00be24e..ead3696281a 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -48,11 +47,12 @@ public class RestSnapshotAction extends AbstractCatAction { @Inject public RestSnapshotAction(Settings settings, RestController controller) { super(settings); + controller.registerHandler(GET, "/_cat/snapshots", this); controller.registerHandler(GET, "/_cat/snapshots/{repository}", this); } @Override - protected void doRequest(final RestRequest request, RestChannel channel, NodeClient client) { + protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient client) { GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest() .repository(request.param("repository")) .snapshots(new String[]{GetSnapshotsRequest.ALL_SNAPSHOTS}); @@ -61,12 +61,13 @@ public class RestSnapshotAction extends AbstractCatAction { getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout())); - client.admin().cluster().getSnapshots(getSnapshotsRequest, new RestResponseListener(channel) { - @Override - public RestResponse buildResponse(GetSnapshotsResponse getSnapshotsResponse) throws Exception { - return RestTable.buildResponse(buildTable(request, getSnapshotsResponse), channel); - } - }); + return channel -> + client.admin().cluster().getSnapshots(getSnapshotsRequest, new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(GetSnapshotsResponse getSnapshotsResponse) throws Exception { + return RestTable.buildResponse(buildTable(request, getSnapshotsResponse), channel); + } + }); } @Override diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java index 5fad57da66d..0ab2c86453e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java @@ -38,12 +38,14 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; import java.util.LinkedHashSet; import java.util.List; +import java.util.Locale; +import java.util.Map; import java.util.Set; -/** - */ public class RestTable { public static RestResponse buildResponse(Table table, RestChannel channel) throws Exception { @@ -61,13 +63,13 @@ public class RestTable { List displayHeaders = buildDisplayHeaders(table, request); builder.startArray(); - for (int row = 0; row < table.getRows().size(); row++) { + List rowOrder = getRowOrder(table, request); + for (Integer row : rowOrder) { builder.startObject(); for (DisplayHeader header : displayHeaders) { builder.field(header.display, renderValue(request, table.getAsMap().get(header.name).get(row).value)); } builder.endObject(); - } builder.endArray(); return new BytesRestResponse(RestStatus.OK, builder); @@ -94,7 +96,10 @@ public class RestTable { } out.append("\n"); } - for (int row = 0; row < table.getRows().size(); row++) { + + List rowOrder = getRowOrder(table, request); + + for (Integer row: rowOrder) { for (int col = 0; col < headers.size(); col++) { DisplayHeader header = headers.get(col); boolean isLastColumn = col == lastHeader; @@ -109,6 +114,38 @@ public class RestTable { return new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, bytesOut.bytes()); } + static List getRowOrder(Table table, RestRequest request) { + String[] columnOrdering = request.paramAsStringArray("s", null); + + List rowOrder = new ArrayList<>(); + for (int i = 0; i < table.getRows().size(); i++) { + rowOrder.add(i); + } + + if (columnOrdering != null) { + Map headerAliasMap = table.getAliasMap(); + List ordering = new ArrayList<>(); + for (int i = 0; i < columnOrdering.length; i++) { + String columnHeader = columnOrdering[i]; + boolean reverse = false; + if (columnHeader.endsWith(":desc")) { + columnHeader = columnHeader.substring(0, columnHeader.length() - ":desc".length()); + reverse = true; + } else if (columnHeader.endsWith(":asc")) { + columnHeader = columnHeader.substring(0, columnHeader.length() - ":asc".length()); + } + if (headerAliasMap.containsKey(columnHeader)) { + ordering.add(new ColumnOrderElement(headerAliasMap.get(columnHeader), reverse)); + } else { + throw new UnsupportedOperationException( + String.format(Locale.ROOT, "Unable to sort by unknown sort key `%s`", columnHeader)); + } + } + Collections.sort(rowOrder, new TableIndexComparator(table, ordering)); + } + return rowOrder; + } + static List buildDisplayHeaders(Table table, RestRequest request) { List display = new ArrayList<>(); if (request.hasParam("h")) { @@ -370,4 +407,71 @@ public class RestTable { this.display = display; } } + + static class TableIndexComparator implements Comparator { + private final Table table; + private final int maxIndex; + private final List ordering; + + TableIndexComparator(Table table, List ordering) { + this.table = table; + this.maxIndex = table.getRows().size(); + this.ordering = ordering; + } + + private int compareCell(Object o1, Object o2) { + if (o1 == null && o2 == null) { + return 0; + } else if (o1 == null) { + return -1; + } else if (o2 == null) { + return 1; + } else { + if (o1 instanceof Comparable && o1.getClass().equals(o2.getClass())) { + return ((Comparable) o1).compareTo(o2); + } else { + return o1.toString().compareTo(o2.toString()); + } + } + } + + @Override + public int compare(Integer rowIndex1, Integer rowIndex2) { + if (rowIndex1 < maxIndex && rowIndex1 >= 0 && rowIndex2 < maxIndex && rowIndex2 >= 0) { + Map> tableMap = table.getAsMap(); + for (ColumnOrderElement orderingElement : ordering) { + String column = orderingElement.getColumn(); + if (tableMap.containsKey(column)) { + int comparison = compareCell(tableMap.get(column).get(rowIndex1).value, + tableMap.get(column).get(rowIndex2).value); + if (comparison != 0) { + return orderingElement.isReversed() ? -1 * comparison : comparison; + } + } + } + return 0; + } else { + throw new AssertionError(String.format(Locale.ENGLISH, "Invalid comparison of indices (%s, %s): Table has %s rows.", + rowIndex1, rowIndex2, table.getRows().size())); + } + } + } + + static class ColumnOrderElement { + private final String column; + private final boolean reverse; + + public ColumnOrderElement(String column, boolean reverse) { + this.column = column; + this.reverse = reverse; + } + + public String getColumn() { + return column; + } + + public boolean isReversed() { + return reverse; + } + } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java index 99eee4e735a..2c490debea0 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java @@ -29,9 +29,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -41,7 +40,10 @@ import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.action.admin.cluster.RestListTasksAction.generateListTasksRequest; @@ -62,8 +64,9 @@ public class RestTasksAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { - client.admin().cluster().listTasks(generateListTasksRequest(request), new RestResponseListener(channel) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { + return channel -> + client.admin().cluster().listTasks(generateListTasksRequest(request), new RestResponseListener(channel) { @Override public RestResponse buildResponse(ListTasksResponse listTasksResponse) throws Exception { return RestTable.buildResponse(buildTable(request, listTasksResponse), channel); @@ -71,6 +74,20 @@ public class RestTasksAction extends AbstractCatAction { }); } + private static final Set RESPONSE_PARAMS; + + static { + final Set responseParams = new HashSet<>(); + responseParams.add("detailed"); + responseParams.addAll(AbstractCatAction.RESPONSE_PARAMS); + RESPONSE_PARAMS = Collections.unmodifiableSet(responseParams); + } + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; + } + @Override protected Table getTableWithHeader(final RestRequest request) { boolean detailed = request.paramAsBoolean("detailed", false); @@ -127,11 +144,7 @@ public class RestTasksAction extends AbstractCatAction { // Node information. Note that the node may be null because it has left the cluster between when we got this response and now. table.addCell(fullId ? nodeId : Strings.substring(nodeId, 0, 4)); table.addCell(node == null ? "-" : node.getHostAddress()); - if (node != null && node.getAddress() instanceof InetSocketTransportAddress) { - table.addCell(((InetSocketTransportAddress) node.getAddress()).address().getPort()); - } else { - table.addCell("-"); - } + table.addCell(node.getAddress().address().getPort()); table.addCell(node == null ? "-" : node.getName()); table.addCell(node == null ? "-" : node.getVersion().toString()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java index 5ee92cbb76c..b62009512a4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -51,14 +50,14 @@ public class RestTemplatesAction extends AbstractCatAction { } @Override - protected void doRequest(final RestRequest request, RestChannel channel, NodeClient client) { + protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient client) { final String matchPattern = request.hasParam("name") ? request.param("name") : null; final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().metaData(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); - client.admin().cluster().state(clusterStateRequest, new RestResponseListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(ClusterStateResponse clusterStateResponse) throws Exception { return RestTable.buildResponse(buildTable(request, clusterStateResponse, matchPattern), channel); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index 6f3c5c11ce0..183a8eefabd 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -34,8 +34,7 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -69,13 +68,13 @@ public class RestThreadPoolAction extends AbstractCatAction { } @Override - public void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); - client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { + return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override public void processResponse(final ClusterStateResponse clusterStateResponse) { NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); @@ -97,6 +96,20 @@ public class RestThreadPoolAction extends AbstractCatAction { }); } + private static final Set RESPONSE_PARAMS; + + static { + final Set responseParams = new HashSet<>(); + responseParams.addAll(AbstractCatAction.RESPONSE_PARAMS); + responseParams.add("thread_pool_patterns"); + RESPONSE_PARAMS = Collections.unmodifiableSet(responseParams); + } + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; + } + @Override protected Table getTableWithHeader(final RestRequest request) { final Table table = new Table(); @@ -182,11 +195,7 @@ public class RestThreadPoolAction extends AbstractCatAction { table.addCell(info == null ? null : info.getProcess().getId()); table.addCell(node.getHostName()); table.addCell(node.getHostAddress()); - if (node.getAddress() instanceof InetSocketTransportAddress) { - table.addCell(((InetSocketTransportAddress) node.getAddress()).address().getPort()); - } else { - table.addCell("-"); - } + table.addCell(node.getAddress().address().getPort()); final ThreadPoolStats.Stats poolStats = entry.getValue(); final ThreadPool.Info poolInfo = poolThreadInfo.get(entry.getKey()); @@ -215,7 +224,7 @@ public class RestThreadPoolAction extends AbstractCatAction { table.addCell(poolStats == null ? null : poolStats.getActive()); table.addCell(poolStats == null ? null : poolStats.getThreads()); table.addCell(poolStats == null ? null : poolStats.getQueue()); - table.addCell(maxQueueSize); + table.addCell(maxQueueSize == null ? -1 : maxQueueSize); table.addCell(poolStats == null ? null : poolStats.getRejected()); table.addCell(poolStats == null ? null : poolStats.getLargest()); table.addCell(poolStats == null ? null : poolStats.getCompleted()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index e87e97ab400..ff603aceefb 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -34,13 +34,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; import static org.elasticsearch.rest.RestStatus.OK; @@ -75,7 +76,7 @@ public class RestBulkAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { BulkRequest bulkRequest = Requests.bulkRequest(); String defaultIndex = request.param("index"); String defaultType = request.param("type"); @@ -96,7 +97,7 @@ public class RestBulkAction extends BaseRestHandler { bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultFetchSourceContext, defaultPipeline, null, allowExplicitIndex); - client.bulk(bulkRequest, new RestBuilderListener(channel) { + return channel -> client.bulk(bulkRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(BulkResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestCountAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestCountAction.java index e5ca6f2cadd..86cf0d8d904 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestCountAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestCountAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -40,14 +39,13 @@ import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.search.builder.SearchSourceBuilder; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; import static org.elasticsearch.search.internal.SearchContext.DEFAULT_TERMINATE_AFTER; -/** - * - */ public class RestCountAction extends BaseRestHandler { private final IndicesQueriesRegistry indicesQueriesRegistry; @@ -65,7 +63,7 @@ public class RestCountAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { SearchRequest countRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index"))); countRequest.indicesOptions(IndicesOptions.fromRequest(request, countRequest.indicesOptions())); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0); @@ -93,7 +91,7 @@ public class RestCountAction extends BaseRestHandler { } else if (terminateAfter > 0) { searchSourceBuilder.terminateAfter(terminateAfter); } - client.search(countRequest, new RestBuilderListener(channel) { + return channel -> client.search(countRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(SearchResponse response, XContentBuilder builder) throws Exception { builder.startObject(); @@ -109,4 +107,5 @@ public class RestCountAction extends BaseRestHandler { } }); } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java index 392cff7ffba..80e4efa4ecb 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java @@ -26,17 +26,15 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestStatusToXContentListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.DELETE; -/** - * - */ public class RestDeleteAction extends BaseRestHandler { @Inject @@ -46,7 +44,7 @@ public class RestDeleteAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), request.param("type"), request.param("id")); deleteRequest.routing(request.param("routing")); deleteRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing @@ -60,6 +58,6 @@ public class RestDeleteAction extends BaseRestHandler { deleteRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); } - client.delete(deleteRequest, new RestStatusToXContentListener<>(channel)); + return channel -> client.delete(deleteRequest, new RestStatusToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java index 550abb3e3bb..7206e6b9d5e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.VersionType; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -37,6 +36,8 @@ import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; @@ -50,7 +51,7 @@ public class RestGetAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); @@ -60,7 +61,7 @@ public class RestGetAction extends BaseRestHandler { getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime())); if (request.param("fields") != null) { throw new IllegalArgumentException("The parameter [fields] is no longer supported, " + - "please use [stored_fields] to retrieve stored fields or or [_source] to load the field from _source"); + "please use [stored_fields] to retrieve stored fields or [_source] to load the field from _source"); } String sField = request.param("stored_fields"); if (sField != null) { @@ -75,7 +76,7 @@ public class RestGetAction extends BaseRestHandler { getRequest.fetchSourceContext(FetchSourceContext.parseFromRestRequest(request)); - client.get(getRequest, new RestBuilderListener(channel) { + return channel -> client.get(getRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(GetResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java index 1ecfe317f4e..ed36cc3c89d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -50,7 +49,7 @@ public class RestGetSourceAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); @@ -61,27 +60,25 @@ public class RestGetSourceAction extends BaseRestHandler { getRequest.fetchSourceContext(FetchSourceContext.parseFromRestRequest(request)); - if (getRequest.fetchSourceContext() != null && !getRequest.fetchSourceContext().fetchSource()) { - try { + return channel -> { + if (getRequest.fetchSourceContext() != null && !getRequest.fetchSourceContext().fetchSource()) { ActionRequestValidationException validationError = new ActionRequestValidationException(); validationError.addValidationError("fetching source can not be disabled"); channel.sendResponse(new BytesRestResponse(channel, validationError)); - } catch (IOException e) { - logger.error("Failed to send failure response", e); + } else { + client.get(getRequest, new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(GetResponse response) throws Exception { + XContentBuilder builder = channel.newBuilder(response.getSourceInternal(), false); + if (response.isSourceEmpty()) { // check if doc source (or doc itself) is missing + return new BytesRestResponse(NOT_FOUND, builder); + } else { + builder.rawValue(response.getSourceInternal()); + return new BytesRestResponse(OK, builder); + } + } + }); } - } - - client.get(getRequest, new RestResponseListener(channel) { - @Override - public RestResponse buildResponse(GetResponse response) throws Exception { - XContentBuilder builder = channel.newBuilder(response.getSourceInternal(), false); - if (response.isSourceEmpty()) { // check if doc source (or doc itself) is missing - return new BytesRestResponse(NOT_FOUND, builder); - } else { - builder.rawValue(response.getSourceInternal()); - return new BytesRestResponse(OK, builder); - } - } - }); + }; } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestHeadAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestHeadAction.java index ad2f826e584..0adb4fcfd28 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestHeadAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestHeadAction.java @@ -28,12 +28,13 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestResponseListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.HEAD; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; @@ -82,7 +83,7 @@ public abstract class RestHeadAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); @@ -94,7 +95,7 @@ public abstract class RestHeadAction extends BaseRestHandler { getRequest.storedFields(Strings.EMPTY_ARRAY); // TODO we can also just return the document size as Content-Length - client.get(getRequest, new RestResponseListener(channel) { + return channel -> client.get(getRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(GetResponse response) { if (!response.isExists()) { @@ -107,4 +108,5 @@ public abstract class RestHeadAction extends BaseRestHandler { } }); } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java index 6c9723b5b93..82b10361153 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java @@ -24,11 +24,8 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.VersionType; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; @@ -38,11 +35,7 @@ import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; -import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; -/** - * - */ public class RestIndexAction extends BaseRestHandler { @Inject @@ -62,14 +55,14 @@ public class RestIndexAction extends BaseRestHandler { } @Override - public void handleRequest(RestRequest request, RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(RestRequest request, final NodeClient client) throws IOException { request.params().put("op_type", "create"); - RestIndexAction.this.handleRequest(request, channel, client); + return RestIndexAction.this.prepareRequest(request, client); } } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndexRequest indexRequest = new IndexRequest(request.param("index"), request.param("type"), request.param("id")); indexRequest.routing(request.param("routing")); indexRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing @@ -84,24 +77,16 @@ public class RestIndexAction extends BaseRestHandler { indexRequest.version(RestActions.parseVersion(request)); indexRequest.versionType(VersionType.fromString(request.param("version_type"), indexRequest.versionType())); String sOpType = request.param("op_type"); - if (sOpType != null) { - try { - indexRequest.opType(IndexRequest.OpType.fromString(sOpType)); - } catch (IllegalArgumentException eia){ - try { - XContentBuilder builder = channel.newErrorBuilder(); - channel.sendResponse( - new BytesRestResponse(BAD_REQUEST, builder.startObject().field("error", eia.getMessage()).endObject())); - } catch (IOException e1) { - logger.warn("Failed to send response", e1); - return; - } - } - } String waitForActiveShards = request.param("wait_for_active_shards"); if (waitForActiveShards != null) { indexRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); } - client.index(indexRequest, new RestStatusToXContentListener<>(channel, r -> r.getLocation(indexRequest.routing()))); + if (sOpType != null) { + indexRequest.opType(sOpType); + } + + return channel -> + client.index(indexRequest, new RestStatusToXContentListener<>(channel, r -> r.getLocation(indexRequest.routing()))); } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java index 07d221fed8e..df9af7f42ad 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java @@ -20,19 +20,19 @@ package org.elasticsearch.rest.action.document; import org.elasticsearch.action.get.MultiGetRequest; -import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -54,7 +54,7 @@ public class RestMultiGetAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh())); multiGetRequest.preference(request.param("preference")); @@ -71,8 +71,8 @@ public class RestMultiGetAction extends BaseRestHandler { FetchSourceContext defaultFetchSource = FetchSourceContext.parseFromRestRequest(request); multiGetRequest.add(request.param("index"), request.param("type"), sFields, defaultFetchSource, - request.param("routing"), RestActions.getRestContent(request), allowExplicitIndex); + request.param("routing"), RestActions.getRestContent(request), allowExplicitIndex); - client.multiGet(multiGetRequest, new RestToXContentListener(channel)); + return channel -> client.multiGet(multiGetRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java index dab23e8df35..fd087e7e763 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java @@ -27,12 +27,13 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -50,7 +51,7 @@ public class RestMultiTermVectorsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { MultiTermVectorsRequest multiTermVectorsRequest = new MultiTermVectorsRequest(); TermVectorsRequest template = new TermVectorsRequest(); template.index(request.param("index")); @@ -59,6 +60,7 @@ public class RestMultiTermVectorsAction extends BaseRestHandler { multiTermVectorsRequest.ids(Strings.commaDelimitedListToStringArray(request.param("ids"))); multiTermVectorsRequest.add(template, RestActions.getRestContent(request)); - client.multiTermVectors(multiTermVectorsRequest, new RestToXContentListener(channel)); + return channel -> client.multiTermVectors(multiTermVectorsRequest, new RestToXContentListener(channel)); } + } diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java index b64219215a9..3724e34d33d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.rest.action.document; import org.elasticsearch.action.termvectors.TermVectorsRequest; -import org.elasticsearch.action.termvectors.TermVectorsResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -29,12 +28,12 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.VersionType; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; import java.util.HashSet; import java.util.Set; @@ -63,17 +62,17 @@ public class RestTermVectorsAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { TermVectorsRequest termVectorsRequest = new TermVectorsRequest(request.param("index"), request.param("type"), request.param("id")); if (RestActions.hasBodyContent(request)) { try (XContentParser parser = XContentFactory.xContent(RestActions.guessBodyContentType(request)) - .createParser(RestActions.getRestContent(request))){ + .createParser(RestActions.getRestContent(request))) { TermVectorsRequest.parseRequest(termVectorsRequest, parser); } } readURIParameters(termVectorsRequest, request); - client.termVectors(termVectorsRequest, new RestToXContentListener(channel)); + return channel -> client.termVectors(termVectorsRequest, new RestToXContentListener<>(channel)); } public static void readURIParameters(TermVectorsRequest termVectorsRequest, RestRequest request) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java index 91f71e72498..e0211ccec2f 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java @@ -30,17 +30,16 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestStatusToXContentListener; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.POST; -/** - */ public class RestUpdateAction extends BaseRestHandler { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(RestUpdateAction.class)); @@ -52,7 +51,7 @@ public class RestUpdateAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { UpdateRequest updateRequest = new UpdateRequest(request.param("index"), request.param("type"), request.param("id")); updateRequest.routing(request.param("routing")); updateRequest.parent(request.param("parent")); @@ -108,6 +107,7 @@ public class RestUpdateAction extends BaseRestHandler { } } - client.update(updateRequest, new RestStatusToXContentListener<>(channel, r -> r.getLocation(updateRequest.routing()))); + return channel -> + client.update(updateRequest, new RestStatusToXContentListener<>(channel, r -> r.getLocation(updateRequest.routing()))); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java index 593b55b8b75..f9430ea0de4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java @@ -24,11 +24,12 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; +import java.io.IOException; + public class RestDeletePipelineAction extends BaseRestHandler { @Inject @@ -38,10 +39,10 @@ public class RestDeletePipelineAction extends BaseRestHandler { } @Override - public void handleRequest(RestRequest restRequest, RestChannel channel, NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { DeletePipelineRequest request = new DeletePipelineRequest(restRequest.param("id")); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); - client.admin().cluster().deletePipeline(request, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().cluster().deletePipeline(request, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java index 308fb146c30..7124bb35b31 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java @@ -25,11 +25,12 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestStatusToXContentListener; +import java.io.IOException; + public class RestGetPipelineAction extends BaseRestHandler { @Inject @@ -40,9 +41,9 @@ public class RestGetPipelineAction extends BaseRestHandler { } @Override - public void handleRequest(RestRequest restRequest, RestChannel channel, NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { GetPipelineRequest request = new GetPipelineRequest(Strings.splitStringByCommaToArray(restRequest.param("id"))); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); - client.admin().cluster().getPipeline(request, new RestStatusToXContentListener<>(channel)); + return channel -> client.admin().cluster().getPipeline(request, new RestStatusToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java index b6d34a6c254..d4388f6bea0 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java @@ -24,12 +24,13 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; import org.elasticsearch.rest.action.RestActions; +import java.io.IOException; + public class RestPutPipelineAction extends BaseRestHandler { @@ -40,11 +41,11 @@ public class RestPutPipelineAction extends BaseRestHandler { } @Override - public void handleRequest(RestRequest restRequest, RestChannel channel, NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { PutPipelineRequest request = new PutPipelineRequest(restRequest.param("id"), RestActions.getRestContent(restRequest)); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); - client.admin().cluster().putPipeline(request, new AcknowledgedRestListener<>(channel)); + return channel -> client.admin().cluster().putPipeline(request, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java index a51bdf5fef2..07a58459582 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java @@ -24,13 +24,13 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; -import org.elasticsearch.rest.action.RestStatusToXContentListener; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; + public class RestSimulatePipelineAction extends BaseRestHandler { @Inject @@ -43,10 +43,10 @@ public class RestSimulatePipelineAction extends BaseRestHandler { } @Override - public void handleRequest(RestRequest restRequest, RestChannel channel, NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { SimulatePipelineRequest request = new SimulatePipelineRequest(RestActions.getRestContent(restRequest)); request.setId(restRequest.param("id")); request.setVerbose(restRequest.paramAsBoolean("verbose", false)); - client.admin().cluster().simulatePipeline(request, new RestToXContentListener<>(channel)); + return channel -> client.admin().cluster().simulatePipeline(request, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java index 4c8f84a2223..bc49bffe447 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; @@ -52,7 +51,7 @@ public class RestClearScrollAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String scrollIds = request.param("scroll_id"); ClearScrollRequest clearRequest = new ClearScrollRequest(); clearRequest.setScrollIds(Arrays.asList(splitScrollIds(scrollIds))); @@ -68,7 +67,7 @@ public class RestClearScrollAction extends BaseRestHandler { } } - client.clearScroll(clearRequest, new RestStatusToXContentListener(channel)); + return channel -> client.clearScroll(clearRequest, new RestStatusToXContentListener(channel)); } public static String[] splitScrollIds(String scrollIds) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java index 597bf3db615..16de4fb9acd 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -64,7 +63,7 @@ public class RestExplainAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final ExplainRequest explainRequest = new ExplainRequest(request.param("index"), request.param("type"), request.param("id")); explainRequest.parent(request.param("parent")); explainRequest.routing(request.param("routing")); @@ -92,7 +91,7 @@ public class RestExplainAction extends BaseRestHandler { explainRequest.fetchSourceContext(FetchSourceContext.parseFromRestRequest(request)); - client.explain(explainRequest, new RestBuilderListener(channel) { + return channel -> client.explain(explainRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ExplainResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index a54e40be731..f55758c4e90 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; @@ -52,8 +51,6 @@ import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeSt import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -/** - */ public class RestMultiSearchAction extends BaseRestHandler { private final boolean allowExplicitIndex; @@ -75,9 +72,9 @@ public class RestMultiSearchAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { MultiSearchRequest multiSearchRequest = parseRequest(request, allowExplicitIndex, searchRequestParsers, parseFieldMatcher); - client.multiSearch(multiSearchRequest, new RestToXContentListener<>(channel)); + return channel -> client.multiSearch(multiSearchRequest, new RestToXContentListener<>(channel)); } /** diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 8acfc72dfe1..c68f2802e83 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; @@ -56,9 +55,6 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.search.suggest.SuggestBuilders.termSuggestion; -/** - * - */ public class RestSearchAction extends BaseRestHandler { private final SearchRequestParsers searchRequestParsers; @@ -76,11 +72,12 @@ public class RestSearchAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { SearchRequest searchRequest = new SearchRequest(); BytesReference restContent = RestActions.hasBodyContent(request) ? RestActions.getRestContent(request) : null; parseSearchRequest(searchRequest, request, searchRequestParsers, parseFieldMatcher, restContent); - client.search(searchRequest, new RestStatusToXContentListener<>(channel)); + + return channel -> client.search(searchRequest, new RestStatusToXContentListener<>(channel)); } /** diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java index 9b9ddd3a93d..abe27bd41fb 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.search; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.bytes.BytesReference; @@ -31,7 +30,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; @@ -57,7 +55,7 @@ public class RestSearchScrollAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String scrollId = request.param("scroll_id"); SearchScrollRequest searchScrollRequest = new SearchScrollRequest(); searchScrollRequest.scrollId(scrollId); @@ -78,7 +76,7 @@ public class RestSearchScrollAction extends BaseRestHandler { buildFromContent(RestActions.getRestContent(request), searchScrollRequest); } } - client.searchScroll(searchScrollRequest, new RestStatusToXContentListener(channel)); + return channel -> client.searchScroll(searchScrollRequest, new RestStatusToXContentListener<>(channel)); } public static void buildFromContent(BytesReference content, SearchScrollRequest searchScrollRequest) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSuggestAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSuggestAction.java index b55a1590f3d..4f61d7f4c81 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSuggestAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSuggestAction.java @@ -19,8 +19,6 @@ package org.elasticsearch.rest.action.search; -import java.io.IOException; - import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -35,7 +33,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -47,6 +44,8 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.SuggestBuilder; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; @@ -67,7 +66,7 @@ public class RestSuggestAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final SearchRequest searchRequest = new SearchRequest( Strings.splitStringByCommaToArray(request.param("index")), new SearchSourceBuilder()); searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); @@ -82,7 +81,7 @@ public class RestSuggestAction extends BaseRestHandler { } searchRequest.routing(request.param("routing")); searchRequest.preference(request.param("preference")); - client.search(searchRequest, new RestBuilderListener(channel) { + return channel -> client.search(searchRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(SearchResponse response, XContentBuilder builder) throws Exception { RestStatus restStatus = RestStatus.status(response.getSuccessfulShards(), diff --git a/core/src/main/java/org/elasticsearch/script/CompiledScript.java b/core/src/main/java/org/elasticsearch/script/CompiledScript.java index ec2ad4192a2..818971f0f89 100644 --- a/core/src/main/java/org/elasticsearch/script/CompiledScript.java +++ b/core/src/main/java/org/elasticsearch/script/CompiledScript.java @@ -24,7 +24,7 @@ package org.elasticsearch.script; */ public class CompiledScript { - private final ScriptService.ScriptType type; + private final ScriptType type; private final String name; private final String lang; private final Object compiled; @@ -36,7 +36,7 @@ public class CompiledScript { * @param lang The language of the script to be executed. * @param compiled The compiled script Object that is executable. */ - public CompiledScript(ScriptService.ScriptType type, String name, String lang, Object compiled) { + public CompiledScript(ScriptType type, String name, String lang, Object compiled) { this.type = type; this.name = name; this.lang = lang; @@ -47,7 +47,7 @@ public class CompiledScript { * Method to get the type of language. * @return The type of language the script was compiled in. */ - public ScriptService.ScriptType type() { + public ScriptType type() { return type; } diff --git a/core/src/main/java/org/elasticsearch/script/Script.java b/core/src/main/java/org/elasticsearch/script/Script.java index 94abb43bc06..59ac7610178 100644 --- a/core/src/main/java/org/elasticsearch/script/Script.java +++ b/core/src/main/java/org/elasticsearch/script/Script.java @@ -19,273 +19,600 @@ package org.elasticsearch.script; -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Nullable; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.index.query.QueryParseContext; import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.Objects; /** - * Script holds all the parameters necessary to compile or find in cache and then execute a script. + * Script represents used-defined input that can be used to + * compile and execute a script from the {@link ScriptService} + * based on the {@link ScriptType}. */ public final class Script implements ToXContent, Writeable { - public static final ScriptType DEFAULT_TYPE = ScriptType.INLINE; + /** + * The name of the of the default scripting language. + */ public static final String DEFAULT_SCRIPT_LANG = "painless"; - private String script; - private ScriptType type; - @Nullable private String lang; - @Nullable private Map params; - @Nullable private XContentType contentType; + /** + * The name of the default template language. + */ + public static final String DEFAULT_TEMPLATE_LANG = "mustache"; /** - * Constructor for simple inline script. The script will have no lang or params set. - * - * @param script The inline script to execute. + * The default {@link ScriptType}. */ - public Script(String script) { - this(script, ScriptType.INLINE, null, null); - } - - public Script(String script, ScriptType type, String lang, @Nullable Map params) { - this(script, type, lang, params, null); - } + public static final ScriptType DEFAULT_SCRIPT_TYPE = ScriptType.INLINE; /** - * Constructor for Script. - * - * @param script The cache key of the script to be compiled/executed. For inline scripts this is the actual - * script source code. For indexed scripts this is the id used in the request. For on file - * scripts this is the file name. - * @param type The type of script -- dynamic, stored, or file. - * @param lang The language of the script to be compiled/executed. - * @param params The map of parameters the script will be executed with. - * @param contentType The {@link XContentType} of the script. Only relevant for inline scripts that have not been - * defined as a plain string, but as json or yaml content. This class needs this information - * when serializing the script back to xcontent. + * Compiler option for {@link XContentType} used for templates. */ - @SuppressWarnings("unchecked") - public Script(String script, ScriptType type, String lang, @Nullable Map params, - @Nullable XContentType contentType) { - if (contentType != null && type != ScriptType.INLINE) { - throw new IllegalArgumentException("The parameter contentType only makes sense for inline scripts"); + public static final String CONTENT_TYPE_OPTION = "content_type"; + + /** + * Standard {@link ParseField} for outer level of script queries. + */ + public static final ParseField SCRIPT_PARSE_FIELD = new ParseField("script"); + + /** + * Standard {@link ParseField} for lang on the inner level. + */ + public static final ParseField LANG_PARSE_FIELD = new ParseField("lang"); + + /** + * Standard {@link ParseField} for options on the inner level. + */ + public static final ParseField OPTIONS_PARSE_FIELD = new ParseField("options"); + + /** + * Standard {@link ParseField} for params on the inner level. + */ + public static final ParseField PARAMS_PARSE_FIELD = new ParseField("params"); + + /** + * Unreleased version used for {@link Script} non-null members format of read/write. + */ + public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099); + + /** + * Helper class used by {@link ObjectParser} to store mutable {@link Script} variables and then + * construct an immutable {@link Script} object based on parsed XContent. + */ + private static final class Builder { + private ScriptType type; + private String lang; + private String idOrCode; + private Map options; + private Map params; + + private Builder() { + // This cannot default to an empty map because options are potentially added at multiple points. + this.options = new HashMap<>(); + this.params = Collections.emptyMap(); } - this.script = Objects.requireNonNull(script); + + /** + * Since inline scripts can accept code rather than just an id, they must also be able + * to handle template parsing, hence the need for custom parsing code. Templates can + * consist of either an {@link String} or a JSON object. If a JSON object is discovered + * then the content type option must also be saved as a compiler option. + */ + private void setInline(XContentParser parser) { + try { + if (type != null) { + throwOnlyOneOfType(); + } + + type = ScriptType.INLINE; + + if (parser.currentToken() == Token.START_OBJECT) { + XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()); + idOrCode = builder.copyCurrentStructure(parser).bytes().utf8ToString(); + options.put(CONTENT_TYPE_OPTION, parser.contentType().mediaType()); + } else { + idOrCode = parser.text(); + } + } catch (IOException exception) { + throw new UncheckedIOException(exception); + } + } + + /** + * Set both the id and the type of the stored script. + */ + private void setStored(String idOrCode) { + if (type != null) { + throwOnlyOneOfType(); + } + + type = ScriptType.STORED; + this.idOrCode = idOrCode; + } + + /** + * Set both the id and the type of the file script. + */ + private void setFile(String idOrCode) { + if (type != null) { + throwOnlyOneOfType(); + } + + type = ScriptType.FILE; + this.idOrCode = idOrCode; + } + + /** + * Helper method to throw an exception if more than one type of {@link Script} is specified. + */ + private void throwOnlyOneOfType() { + throw new IllegalArgumentException("must only use one of [" + + ScriptType.INLINE.getParseField().getPreferredName() + " + , " + + ScriptType.STORED.getParseField().getPreferredName() + " + , " + + ScriptType.FILE.getParseField().getPreferredName() + "]" + + " when specifying a script"); + } + + private void setLang(String lang) { + this.lang = lang; + } + + /** + * Options may have already been added if an inline template was specified. + * Appends the user-defined compiler options with the internal compiler options. + */ + private void setOptions(Map options) { + this.options.putAll(options); + } + + private void setParams(Map params) { + this.params = params; + } + + /** + * Validates the parameters and creates an {@link Script}. + * @param defaultLang The default lang is not a compile-time constant and must be provided + * at run-time this way in case a legacy default language is used from + * previously stored queries. + */ + private Script build(String defaultLang) { + if (type == null) { + throw new IllegalArgumentException( + "must specify either code for an [" + ScriptType.INLINE.getParseField().getPreferredName() + "] script " + + "or an id for a [" + ScriptType.STORED.getParseField().getPreferredName() + "] script " + + "or [" + ScriptType.FILE.getParseField().getPreferredName() + "] script"); + } + + if (idOrCode == null) { + throw new IllegalArgumentException("must specify an id or code for a script"); + } + + if (options.size() > 1 || options.size() == 1 && options.get(CONTENT_TYPE_OPTION) == null) { + throw new IllegalArgumentException("illegal compiler options [" + options + "] specified"); + } + + return new Script(type, this.lang == null ? defaultLang : this.lang, idOrCode, options, params); + } + } + + private static final ObjectParser PARSER = new ObjectParser<>("script", Builder::new); + + static { + // Defines the fields necessary to parse a Script as XContent using an ObjectParser. + PARSER.declareField(Builder::setInline, parser -> parser, ScriptType.INLINE.getParseField(), ValueType.OBJECT_OR_STRING); + PARSER.declareString(Builder::setStored, ScriptType.STORED.getParseField()); + PARSER.declareString(Builder::setFile, ScriptType.FILE.getParseField()); + PARSER.declareString(Builder::setLang, LANG_PARSE_FIELD); + PARSER.declareField(Builder::setOptions, XContentParser::mapStrings, OPTIONS_PARSE_FIELD, ValueType.OBJECT); + PARSER.declareField(Builder::setParams, XContentParser::map, PARAMS_PARSE_FIELD, ValueType.OBJECT); + } + + /** + * Convenience method to call {@link Script#parse(XContentParser, ParseFieldMatcher, String)} + * using the default scripting language. + */ + public static Script parse(XContentParser parser, ParseFieldMatcher matcher) throws IOException { + return parse(parser, matcher, DEFAULT_SCRIPT_LANG); + } + + /** + * Convenience method to call {@link Script#parse(XContentParser, ParseFieldMatcher, String)} using the + * {@link ParseFieldMatcher} and scripting language provided by the {@link QueryParseContext}. + */ + public static Script parse(XContentParser parser, QueryParseContext context) throws IOException { + return parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); + } + + /** + * This will parse XContent into a {@link Script}. The following formats can be parsed: + * + * The simple format defaults to an {@link ScriptType#INLINE} with no compiler options or user-defined params: + * + * Example: + * {@code + * "return Math.log(doc.popularity) * 100;" + * } + * + * The complex format where {@link ScriptType} and idOrCode are required while lang, options and params are not required. + * + * {@code + * { + * "" : "", + * "lang" : "", + * "options" : { + * "option0" : "", + * "option1" : "", + * ... + * }, + * "params" : { + * "param0" : "", + * "param1" : "", + * ... + * } + * } + * } + * + * Example: + * {@code + * { + * "inline" : "return Math.log(doc.popularity) * params.multiplier", + * "lang" : "painless", + * "params" : { + * "multiplier" : 100.0 + * } + * } + * } + * + * This also handles templates in a special way. If a complexly formatted query is specified as another complex + * JSON object the query is assumed to be a template, and the format will be preserved. + * + * {@code + * { + * "inline" : { "query" : ... }, + * "lang" : "", + * "options" : { + * "option0" : "", + * "option1" : "", + * ... + * }, + * "params" : { + * "param0" : "", + * "param1" : "", + * ... + * } + * } + * } + * + * @param parser The {@link XContentParser} to be used. + * @param matcher The {@link ParseFieldMatcher} to be used. + * @param defaultLang The default language to use if no language is specified. The default language isn't necessarily + * the one defined by {@link Script#DEFAULT_SCRIPT_LANG} due to backwards compatiblity requirements + * related to stored queries using previously default languauges. + * @return The parsed {@link Script}. + */ + public static Script parse(XContentParser parser, ParseFieldMatcher matcher, String defaultLang) throws IOException { + Objects.requireNonNull(defaultLang); + + Token token = parser.currentToken(); + + if (token == null) { + token = parser.nextToken(); + } + + if (token == Token.VALUE_STRING) { + return new Script(ScriptType.INLINE, defaultLang, parser.text(), Collections.emptyMap()); + } + + return PARSER.apply(parser, () -> matcher).build(defaultLang); + } + + private final ScriptType type; + private final String lang; + private final String idOrCode; + private final Map options; + private final Map params; + + /** + * Constructor for simple script using the default language and default type. + * @param idOrCode The id or code to use dependent on the default script type. + */ + public Script(String idOrCode) { + this(DEFAULT_SCRIPT_TYPE, DEFAULT_SCRIPT_LANG, idOrCode, Collections.emptyMap(), Collections.emptyMap()); + } + + /** + * Constructor for a script that does not need to use compiler options. + * @param type The {@link ScriptType}. + * @param lang The lang for this {@link Script}. + * @param idOrCode The id for this {@link Script} if the {@link ScriptType} is {@link ScriptType#FILE} or {@link ScriptType#STORED}. + * The code for this {@link Script} if the {@link ScriptType} is {@link ScriptType#INLINE}. + * @param params The user-defined params to be bound for script execution. + */ + public Script(ScriptType type, String lang, String idOrCode, Map params) { + this(type, lang, idOrCode, Collections.emptyMap(), params); + } + + /** + * Constructor for a script that requires the use of compiler options. + * @param type The {@link ScriptType}. + * @param lang The lang for this {@link Script}. + * @param idOrCode The id for this {@link Script} if the {@link ScriptType} is {@link ScriptType#FILE} or {@link ScriptType#STORED}. + * The code for this {@link Script} if the {@link ScriptType} is {@link ScriptType#INLINE}. + * @param options The options to be passed to the compiler for use at compile-time. + * @param params The user-defined params to be bound for script execution. + */ + public Script(ScriptType type, String lang, String idOrCode, Map options, Map params) { + this.idOrCode = Objects.requireNonNull(idOrCode); this.type = Objects.requireNonNull(type); - this.lang = lang == null ? DEFAULT_SCRIPT_LANG : lang; - this.params = (Map) params; - this.contentType = contentType; + this.lang = Objects.requireNonNull(lang); + this.options = Collections.unmodifiableMap(Objects.requireNonNull(options)); + this.params = Collections.unmodifiableMap(Objects.requireNonNull(params)); + + if (type != ScriptType.INLINE && !options.isEmpty()) { + throw new IllegalArgumentException( + "Compiler options [" + options + "] cannot be specified at runtime for [" + type + "] scripts."); + } } + /** + * Creates a {@link Script} read from an input stream. + */ public Script(StreamInput in) throws IOException { - script = in.readString(); - if (in.readBoolean()) { - type = ScriptType.readFrom(in); - } - lang = in.readOptionalString(); - params = in.readMap(); - if (in.readBoolean()) { - contentType = XContentType.readFrom(in); + // Version 5.1+ requires all Script members to be non-null and supports the potential + // for more options than just XContentType. Reorders the read in contents to be in + // same order as the constructor. + if (in.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) { + this.type = ScriptType.readFrom(in); + this.lang = in.readString(); + this.idOrCode = in.readString(); + @SuppressWarnings("unchecked") + Map options = (Map)(Map)in.readMap(); + this.options = options; + this.params = in.readMap(); + // Prior to version 5.1 the script members are read in certain cases as optional and given + // default values when necessary. Also the only option supported is for XContentType. + } else { + String idOrCode = in.readString(); + ScriptType type; + + if (in.readBoolean()) { + type = ScriptType.readFrom(in); + } else { + type = DEFAULT_SCRIPT_TYPE; + } + + String lang = in.readOptionalString(); + + if (lang == null) { + lang = DEFAULT_SCRIPT_LANG; + } + + Map params = in.readMap(); + + if (params == null) { + params = new HashMap<>(); + } + + Map options = new HashMap<>(); + + if (in.readBoolean()) { + XContentType contentType = XContentType.readFrom(in); + options.put(CONTENT_TYPE_OPTION, contentType.mediaType()); + } + + this.type = type; + this.lang = lang; + this.idOrCode = idOrCode; + this.options = options; + this.params = params; } } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(script); - boolean hasType = type != null; - out.writeBoolean(hasType); - if (hasType) { - ScriptType.writeTo(type, out); - } - out.writeOptionalString(lang); - out.writeMap(params); - boolean hasContentType = contentType != null; - out.writeBoolean(hasContentType); - if (hasContentType) { - XContentType.writeTo(contentType, out); + // Version 5.1+ requires all Script members to be non-null and supports the potential + // for more options than just XContentType. Reorders the written out contents to be in + // same order as the constructor. + if (out.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) { + type.writeTo(out); + out.writeString(lang); + out.writeString(idOrCode); + @SuppressWarnings("unchecked") + Map options = (Map)(Map)this.options; + out.writeMap(options); + out.writeMap(params); + // Prior to version 5.1 the Script members were possibly written as optional or null, though this is no longer + // necessary since Script members cannot be null anymore, and there is no case where a null value wasn't equivalent + // to it's default value when actually compiling/executing a script. Meaning, there are no backwards compatibility issues, + // and now there's enforced consistency. Also the only supported compiler option was XContentType. + } else { + out.writeString(idOrCode); + out.writeBoolean(true); + type.writeTo(out); + out.writeBoolean(true); + out.writeString(lang); + out.writeMap(params.isEmpty() ? null : params); + + if (options.containsKey(CONTENT_TYPE_OPTION)) { + XContentType contentType = XContentType.fromMediaTypeOrFormat(options.get(CONTENT_TYPE_OPTION)); + out.writeBoolean(true); + contentType.writeTo(out); + } else { + out.writeBoolean(false); + } } } /** - * Method for getting the script. - * @return The cache key of the script to be compiled/executed. For dynamic scripts this is the actual - * script source code. For indexed scripts this is the id used in the request. For on disk scripts - * this is the file name. - */ - public String getScript() { - return script; - } - - /** - * Method for getting the type. + * This will build scripts into the following XContent structure: * - * @return The type of script -- inline, stored, or file. + * {@code + * { + * "" : "", + * "lang" : "", + * "options" : { + * "option0" : "", + * "option1" : "", + * ... + * }, + * "params" : { + * "param0" : "", + * "param1" : "", + * ... + * } + * } + * } + * + * Example: + * {@code + * { + * "inline" : "return Math.log(doc.popularity) * params.multiplier;", + * "lang" : "painless", + * "params" : { + * "multiplier" : 100.0 + * } + * } + * } + * + * Note that options and params will only be included if there have been any specified. + * + * This also handles templates in a special way. If the {@link Script#CONTENT_TYPE_OPTION} option + * is provided and the {@link ScriptType#INLINE} is specified then the template will be preserved as a raw field. + * + * {@code + * { + * "inline" : { "query" : ... }, + * "lang" : "", + * "options" : { + * "option0" : "", + * "option1" : "", + * ... + * }, + * "params" : { + * "param0" : "", + * "param1" : "", + * ... + * } + * } + * } + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params builderParams) throws IOException { + builder.startObject(); + + String contentType = options.get(CONTENT_TYPE_OPTION); + + if (type == ScriptType.INLINE && contentType != null && builder.contentType().mediaType().equals(contentType)) { + builder.rawField(type.getParseField().getPreferredName(), new BytesArray(idOrCode)); + } else { + builder.field(type.getParseField().getPreferredName(), idOrCode); + } + + builder.field(LANG_PARSE_FIELD.getPreferredName(), lang); + + if (!options.isEmpty()) { + builder.field(OPTIONS_PARSE_FIELD.getPreferredName(), options); + } + + if (!params.isEmpty()) { + builder.field(PARAMS_PARSE_FIELD.getPreferredName(), params); + } + + builder.endObject(); + + return builder; + } + + /** + * @return The id for this {@link Script} if the {@link ScriptType} is {@link ScriptType#FILE} or {@link ScriptType#STORED}. + * The code for this {@link Script} if the {@link ScriptType} is {@link ScriptType#INLINE}. + */ + public String getIdOrCode() { + return idOrCode; + } + + /** + * @return The {@link ScriptType} for this {@link Script}. */ public ScriptType getType() { return type; } /** - * Method for getting language. - * - * @return The language of the script to be compiled/executed. + * @return The language for this {@link Script}. */ public String getLang() { return lang; } /** - * Method for getting the parameters. - * - * @return The map of parameters the script will be executed with. + * @return The map of compiler options for this {@link Script}. + */ + public Map getOptions() { + return options; + } + + /** + * @return The map of user-defined params for this {@link Script}. */ public Map getParams() { return params; } - /** - * @return The content type of the script if it is an inline script and the script has been defined as json - * or yaml content instead of a plain string. - */ - public XContentType getContentType() { - return contentType; - } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params builderParams) throws IOException { - if (type == null) { - return builder.value(script); - } - builder.startObject(); - if (type == ScriptType.INLINE && contentType != null && builder.contentType() == contentType) { - builder.rawField(type.getParseField().getPreferredName(), new BytesArray(script)); - } else { - builder.field(type.getParseField().getPreferredName(), script); - } - if (lang != null) { - builder.field(ScriptField.LANG.getPreferredName(), lang); - } - if (params != null) { - builder.field(ScriptField.PARAMS.getPreferredName(), params); - } - builder.endObject(); - return builder; - } + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; - public static Script parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { - return parse(parser, parseFieldMatcher, null); - } + Script script = (Script)o; + + if (type != script.type) return false; + if (!lang.equals(script.lang)) return false; + if (!idOrCode.equals(script.idOrCode)) return false; + if (!options.equals(script.options)) return false; + return params.equals(script.params); - public static Script parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, @Nullable String lang) throws IOException { - XContentParser.Token token = parser.currentToken(); - // If the parser hasn't yet been pushed to the first token, do it now - if (token == null) { - token = parser.nextToken(); - } - if (token == XContentParser.Token.VALUE_STRING) { - return new Script(parser.text(), ScriptType.INLINE, lang, null); - } - if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("expected a string value or an object, but found [{}] instead", token); - } - String script = null; - ScriptType type = null; - Map params = null; - XContentType contentType = null; - String cfn = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - cfn = parser.currentName(); - } else if (parseFieldMatcher.match(cfn, ScriptType.INLINE.getParseField())) { - type = ScriptType.INLINE; - if (parser.currentToken() == XContentParser.Token.START_OBJECT) { - contentType = parser.contentType(); - XContentBuilder builder = XContentFactory.contentBuilder(contentType); - script = builder.copyCurrentStructure(parser).bytes().utf8ToString(); - } else { - script = parser.text(); - } - } else if (parseFieldMatcher.match(cfn, ScriptType.FILE.getParseField())) { - type = ScriptType.FILE; - if (token == XContentParser.Token.VALUE_STRING) { - script = parser.text(); - } else { - throw new ElasticsearchParseException("expected a string value for field [{}], but found [{}]", cfn, token); - } - } else if (parseFieldMatcher.match(cfn, ScriptType.STORED.getParseField())) { - type = ScriptType.STORED; - if (token == XContentParser.Token.VALUE_STRING) { - script = parser.text(); - } else { - throw new ElasticsearchParseException("expected a string value for field [{}], but found [{}]", cfn, token); - } - } else if (parseFieldMatcher.match(cfn, ScriptField.LANG)) { - if (token == XContentParser.Token.VALUE_STRING) { - lang = parser.text(); - } else { - throw new ElasticsearchParseException("expected a string value for field [{}], but found [{}]", cfn, token); - } - } else if (parseFieldMatcher.match(cfn, ScriptField.PARAMS)) { - if (token == XContentParser.Token.START_OBJECT) { - params = parser.map(); - } else { - throw new ElasticsearchParseException("expected an object for field [{}], but found [{}]", cfn, token); - } - } else { - throw new ElasticsearchParseException("unexpected field [{}]", cfn); - } - } - if (script == null) { - throw new ElasticsearchParseException("expected one of [{}], [{}] or [{}] fields, but found none", - ScriptType.INLINE.getParseField() .getPreferredName(), ScriptType.FILE.getParseField().getPreferredName(), - ScriptType.STORED.getParseField() .getPreferredName()); - } - return new Script(script, type, lang, params, contentType); } @Override public int hashCode() { - return Objects.hash(lang, params, script, type, contentType); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - Script other = (Script) obj; - - return Objects.equals(lang, other.lang) && - Objects.equals(params, other.params) && - Objects.equals(script, other.script) && - Objects.equals(type, other.type) && - Objects.equals(contentType, other.contentType); + int result = type.hashCode(); + result = 31 * result + lang.hashCode(); + result = 31 * result + idOrCode.hashCode(); + result = 31 * result + options.hashCode(); + result = 31 * result + params.hashCode(); + return result; } @Override public String toString() { - return "[script: " + script + ", type: " + type.getParseField().getPreferredName() + ", lang: " - + lang + ", params: " + params + "]"; + return "Script{" + + "type=" + type + + ", lang='" + lang + '\'' + + ", idOrCode='" + idOrCode + '\'' + + ", options=" + options + + ", params=" + params + + '}'; } - - public interface ScriptField { - ParseField SCRIPT = new ParseField("script"); - ParseField LANG = new ParseField("lang"); - ParseField PARAMS = new ParseField("params"); - } - } diff --git a/core/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java b/core/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java index b4ed91faeba..2b7feeb8d7f 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java @@ -19,8 +19,6 @@ package org.elasticsearch.script; -import org.elasticsearch.common.settings.Settings; - import java.util.Collection; import java.util.HashMap; import java.util.HashSet; @@ -81,8 +79,8 @@ public final class ScriptContextRegistry { } private static Set reservedScriptContexts() { - Set reserved = new HashSet<>(ScriptService.ScriptType.values().length + ScriptContext.Standard.values().length); - for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { + Set reserved = new HashSet<>(ScriptType.values().length + ScriptContext.Standard.values().length); + for (ScriptType scriptType : ScriptType.values()) { reserved.add(scriptType.toString()); } for (ScriptContext.Standard scriptContext : ScriptContext.Standard.values()) { diff --git a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java index 55a931d8d57..1760dbaa3ad 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java @@ -25,9 +25,6 @@ import org.elasticsearch.search.lookup.SearchLookup; import java.io.Closeable; import java.util.Map; -/** - * - */ public interface ScriptEngineService extends Closeable { String getType(); diff --git a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java index 979bffb4bcc..84855da2f94 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -131,7 +131,7 @@ public final class ScriptMetaData implements MetaData.Custom { @Override public EnumSet context() { - return MetaData.API_AND_GATEWAY; + return MetaData.ALL_CONTEXTS; } @Override diff --git a/core/src/main/java/org/elasticsearch/script/ScriptModes.java b/core/src/main/java/org/elasticsearch/script/ScriptModes.java index 46ab2a44d21..15393948d66 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptModes.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptModes.java @@ -21,7 +21,6 @@ package org.elasticsearch.script; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.ScriptService.ScriptType; import java.util.Collections; import java.util.HashMap; @@ -73,7 +72,7 @@ public class ScriptModes { } static String sourceKey(ScriptType scriptType) { - return SCRIPT_SETTINGS_PREFIX + "." + scriptType.getScriptType(); + return SCRIPT_SETTINGS_PREFIX + "." + scriptType.getName(); } static String getGlobalKey(String lang, ScriptType scriptType) { diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 9e61f39378e..1dc1cda0ada 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.bytes.BytesReference; @@ -46,8 +45,6 @@ import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -70,7 +67,6 @@ import java.nio.file.Path; import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.concurrent.ConcurrentMap; @@ -136,7 +132,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust TimeValue cacheExpire = SCRIPT_CACHE_EXPIRE_SETTING.get(settings); if (cacheExpire.getNanos() != 0) { - cacheBuilder.setExpireAfterAccess(cacheExpire.nanos()); + cacheBuilder.setExpireAfterAccess(cacheExpire); } logger.debug("using script cache with max_size [{}], expire [{}]", cacheMaxSize, cacheExpire); @@ -249,7 +245,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust long timePassed = now - lastInlineCompileTime; lastInlineCompileTime = now; - scriptsPerMinCounter += ((double) timePassed) * compilesAllowedPerNano; + scriptsPerMinCounter += (timePassed) * compilesAllowedPerNano; // It's been over the time limit anyway, readjust the bucket to be level if (scriptsPerMinCounter > totalCompilesPerMinute) { @@ -278,9 +274,9 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust String lang = script.getLang(); ScriptType type = script.getType(); - //script.getScript() could return either a name or code for a script, + //script.getIdOrCode() could return either a name or code for a script, //but we check for a file script name first and an indexed script name second - String name = script.getScript(); + String name = script.getIdOrCode(); if (logger.isTraceEnabled()) { logger.trace("Compiling lang: [{}] type: [{}] script: {}", lang, type, name); @@ -300,8 +296,8 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust return compiledScript; } - //script.getScript() will be code if the script type is inline - String code = script.getScript(); + //script.getIdOrCode() will be code if the script type is inline + String code = script.getIdOrCode(); if (type == ScriptType.STORED) { //The look up for an indexed script must be done every time in case @@ -472,23 +468,31 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust /** * Compiles (or retrieves from cache) and executes the provided script */ - public ExecutableScript executable(Script script, ScriptContext scriptContext, Map params) { - return executable(compile(script, scriptContext, params), script.getParams()); + public ExecutableScript executable(Script script, ScriptContext scriptContext) { + return executable(compile(script, scriptContext, script.getOptions()), script.getParams()); } /** * Executes a previously compiled script provided as an argument */ - public ExecutableScript executable(CompiledScript compiledScript, Map vars) { - return getScriptEngineServiceForLang(compiledScript.lang()).executable(compiledScript, vars); + public ExecutableScript executable(CompiledScript compiledScript, Map params) { + return getScriptEngineServiceForLang(compiledScript.lang()).executable(compiledScript, params); } /** * Compiles (or retrieves from cache) and executes the provided search script */ - public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext, Map params) { - CompiledScript compiledScript = compile(script, scriptContext, params); - return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript, lookup, script.getParams()); + public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext) { + CompiledScript compiledScript = compile(script, scriptContext, script.getOptions()); + return search(lookup, compiledScript, script.getParams()); + } + + /** + * Binds provided parameters to a compiled script returning a + * {@link SearchScript} ready for execution + */ + public SearchScript search(SearchLookup lookup, CompiledScript compiledScript, Map params) { + return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript, lookup, params); } private boolean isAnyScriptContextEnabled(String lang, ScriptType scriptType) { @@ -624,68 +628,6 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust } - /** - * The type of a script, more specifically where it gets loaded from: - * - provided dynamically at request time - * - loaded from an index - * - loaded from file - */ - public enum ScriptType { - - INLINE(0, "inline", "inline", false), - STORED(1, "id", "stored", false), - FILE(2, "file", "file", true); - - private final int val; - private final ParseField parseField; - private final String scriptType; - private final boolean defaultScriptEnabled; - - public static ScriptType readFrom(StreamInput in) throws IOException { - int scriptTypeVal = in.readVInt(); - for (ScriptType type : values()) { - if (type.val == scriptTypeVal) { - return type; - } - } - throw new IllegalArgumentException("Unexpected value read for ScriptType got [" + scriptTypeVal + "] expected one of [" - + INLINE.val + "," + FILE.val + "," + STORED.val + "]"); - } - - public static void writeTo(ScriptType scriptType, StreamOutput out) throws IOException{ - if (scriptType != null) { - out.writeVInt(scriptType.val); - } else { - out.writeVInt(INLINE.val); //Default to inline - } - } - - ScriptType(int val, String name, String scriptType, boolean defaultScriptEnabled) { - this.val = val; - this.parseField = new ParseField(name); - this.scriptType = scriptType; - this.defaultScriptEnabled = defaultScriptEnabled; - } - - public ParseField getParseField() { - return parseField; - } - - public boolean getDefaultScriptEnabled() { - return defaultScriptEnabled; - } - - public String getScriptType() { - return scriptType; - } - - @Override - public String toString() { - return name().toLowerCase(Locale.ROOT); - } - - } - private static final class CacheKey { final String lang; final String name; diff --git a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java index 1cb2b356245..27a6ad04a70 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java @@ -43,14 +43,14 @@ public class ScriptSettings { @Deprecated public static final String LEGACY_SCRIPT_SETTING = "script.legacy.default_lang"; - private static final Map> SCRIPT_TYPE_SETTING_MAP; + private static final Map> SCRIPT_TYPE_SETTING_MAP; static { - Map> scriptTypeSettingMap = new HashMap<>(); - for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { + Map> scriptTypeSettingMap = new HashMap<>(); + for (ScriptType scriptType : ScriptType.values()) { scriptTypeSettingMap.put(scriptType, Setting.boolSetting( ScriptModes.sourceKey(scriptType), - scriptType.getDefaultScriptEnabled(), + scriptType.isDefaultEnabled(), Property.NodeScope)); } SCRIPT_TYPE_SETTING_MAP = Collections.unmodifiableMap(scriptTypeSettingMap); @@ -84,7 +84,7 @@ public class ScriptSettings { return scriptContextSettingMap; } - private static List> languageSettings(Map> scriptTypeSettingMap, + private static List> languageSettings(Map> scriptTypeSettingMap, Map> scriptContextSettingMap, ScriptEngineRegistry scriptEngineRegistry, ScriptContextRegistry scriptContextRegistry) { @@ -96,13 +96,13 @@ public class ScriptSettings { continue; } final String language = scriptEngineRegistry.getLanguage(scriptEngineService); - for (final ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { + for (final ScriptType scriptType : ScriptType.values()) { // Top level, like "script.engine.groovy.inline" final boolean defaultNonFileScriptMode = scriptEngineRegistry.getDefaultInlineScriptEnableds().get(language); boolean defaultLangAndType = defaultNonFileScriptMode; // Files are treated differently because they are never default-deny - if (ScriptService.ScriptType.FILE == scriptType) { - defaultLangAndType = ScriptService.ScriptType.FILE.getDefaultScriptEnabled(); + if (ScriptType.FILE == scriptType) { + defaultLangAndType = ScriptType.FILE.isDefaultEnabled(); } final boolean defaultIfNothingSet = defaultLangAndType; diff --git a/core/src/main/java/org/elasticsearch/script/ScriptType.java b/core/src/main/java/org/elasticsearch/script/ScriptType.java new file mode 100644 index 00000000000..01592b57aad --- /dev/null +++ b/core/src/main/java/org/elasticsearch/script/ScriptType.java @@ -0,0 +1,139 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * ScriptType represents the way a script is stored and retrieved from the {@link ScriptService}. + * It's also used to by {@link ScriptSettings} and {@link ScriptModes} to determine whether or not + * a {@link Script} is allowed to be executed based on both default and user-defined settings. + */ +public enum ScriptType implements Writeable { + + /** + * INLINE scripts are specified in numerous queries and compiled on-the-fly. + * They will be cached based on the lang and code of the script. + * They are turned off by default because most languages are insecure + * (Groovy and others), but can be overriden by the specific {@link ScriptEngineService} + * if the language is naturally secure (Painless, Mustache, and Expressions). + */ + INLINE ( 0 , new ParseField("inline") , false ), + + /** + * STORED scripts are saved as part of the {@link org.elasticsearch.cluster.ClusterState} + * based on user requests. They will be cached when they are first used in a query. + * They are turned off by default because most languages are insecure + * (Groovy and others), but can be overriden by the specific {@link ScriptEngineService} + * if the language is naturally secure (Painless, Mustache, and Expressions). + */ + STORED ( 1 , new ParseField("stored", "id") , false ), + + /** + * FILE scripts are loaded from disk either on start-up or on-the-fly depending on + * user-defined settings. They will be compiled and cached as soon as they are loaded + * from disk. They are turned on by default as they should always be safe to execute. + */ + FILE ( 2 , new ParseField("file") , true ); + + /** + * Reads an int from the input stream and converts it to a {@link ScriptType}. + * @return The ScriptType read from the stream. Throws an {@link IllegalStateException} + * if no ScriptType is found based on the id. + */ + public static ScriptType readFrom(StreamInput in) throws IOException { + int id = in.readVInt(); + + if (FILE.id == id) { + return FILE; + } else if (STORED.id == id) { + return STORED; + } else if (INLINE.id == id) { + return INLINE; + } else { + throw new IllegalStateException("Error reading ScriptType id [" + id + "] from stream, expected one of [" + + FILE.id + " [" + FILE.parseField.getPreferredName() + "], " + + STORED.id + " [" + STORED.parseField.getPreferredName() + "], " + + INLINE.id + " [" + INLINE.parseField.getPreferredName() + "]]"); + } + } + + private final int id; + private final ParseField parseField; + private final boolean defaultEnabled; + + /** + * Standard constructor. + * @param id A unique identifier for a type that can be read/written to a stream. + * @param parseField Specifies the name used to parse input from queries. + * @param defaultEnabled Whether or not a {@link ScriptType} can be run by default. + */ + ScriptType(int id, ParseField parseField, boolean defaultEnabled) { + this.id = id; + this.parseField = parseField; + this.defaultEnabled = defaultEnabled; + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(id); + } + + /** + * @return The unique id for this {@link ScriptType}. + */ + public int getId() { + return id; + } + + /** + * @return The unique name for this {@link ScriptType} based on the {@link ParseField}. + */ + public String getName() { + return parseField.getPreferredName(); + } + + /** + * @return Specifies the name used to parse input from queries. + */ + public ParseField getParseField() { + return parseField; + } + + /** + * @return Whether or not a {@link ScriptType} can be run by default. Note + * this can be potentially overriden by any {@link ScriptEngineService}. + */ + public boolean isDefaultEnabled() { + return defaultEnabled; + } + + /** + * @return The same as calling {@link #getName()}. + */ + @Override + public String toString() { + return getName(); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 9aab786aa34..8c4981b5541 100644 --- a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -19,7 +19,6 @@ package org.elasticsearch.search; -import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; @@ -28,6 +27,7 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Counter; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; @@ -48,10 +48,10 @@ import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.ParsedQuery; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.SearchContextAggregations; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchPhase; @@ -65,7 +65,6 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QueryPhaseExecutionException; import org.elasticsearch.search.query.QuerySearchResult; @@ -75,6 +74,7 @@ import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -89,7 +89,6 @@ final class DefaultSearchContext extends SearchContext { private final Counter timeEstimateCounter; private SearchType searchType; private final Engine.Searcher engineSearcher; - private final ScriptService scriptService; private final BigArrays bigArrays; private final IndexShard indexShard; private final IndexService indexService; @@ -115,8 +114,11 @@ final class DefaultSearchContext extends SearchContext { private Float minimumScore; private boolean trackScores = false; // when sorting, track scores as well... private FieldDoc searchAfter; + private boolean lowLevelCancellation; // filter for sliced scroll private SliceBuilder sliceBuilder; + private SearchTask task; + /** * The original query as sent by the user without the types and aliases @@ -138,7 +140,6 @@ final class DefaultSearchContext extends SearchContext { private SearchContextHighlight highlight; private SuggestionSearchContext suggest; private List rescore; - private SearchLookup searchLookup; private volatile long keepAlive; private final long originNanoTime = System.nanoTime(); private volatile long lastAccessTime = -1; @@ -150,9 +151,9 @@ final class DefaultSearchContext extends SearchContext { private FetchPhase fetchPhase; DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, - IndexService indexService, IndexShard indexShard, ScriptService scriptService, - BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout, - FetchPhase fetchPhase) { + IndexService indexService, IndexShard indexShard, + BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout, + FetchPhase fetchPhase) { super(parseFieldMatcher); this.id = id; this.request = request; @@ -160,7 +161,6 @@ final class DefaultSearchContext extends SearchContext { this.searchType = request.searchType(); this.shardTarget = shardTarget; this.engineSearcher = engineSearcher; - this.scriptService = scriptService; // SearchContexts use a BigArrays that can circuit break this.bigArrays = bigArrays.withCircuitBreaking(); this.dfsResult = new DfsSearchResult(id, shardTarget); @@ -171,7 +171,7 @@ final class DefaultSearchContext extends SearchContext { this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.timeEstimateCounter = timeEstimateCounter; this.timeout = timeout; - queryShardContext = indexService.newQueryShardContext(searcher.getIndexReader()); + queryShardContext = indexService.newQueryShardContext(request.shardId().id(), searcher.getIndexReader(), request::nowInMillis); queryShardContext.setTypes(request.types()); } @@ -231,7 +231,12 @@ final class DefaultSearchContext extends SearchContext { } // initialize the filtering alias based on the provided filters - aliasFilter = indexService.aliasFilter(queryShardContext, request.filteringAliases()); + try { + final QueryBuilder queryBuilder = request.filteringAliases(); + aliasFilter = queryBuilder == null ? null : queryBuilder.toFilter(queryShardContext); + } catch (IOException e) { + throw new UncheckedIOException(e); + } if (query() == null) { parsedQuery(ParsedQuery.parsedMatchAllQuery()); @@ -292,7 +297,7 @@ final class DefaultSearchContext extends SearchContext { for (int i = 0; i < typesBytes.length; i++) { typesBytes[i] = new BytesRef(types[i]); } - typesFilter = new TermsQuery(TypeFieldMapper.NAME, typesBytes); + typesFilter = new TypeFieldMapper.TypesQuery(typesBytes); } if (typesFilter == null && aliasFilter == null && hasNestedFields == false) { @@ -358,11 +363,6 @@ final class DefaultSearchContext extends SearchContext { return originNanoTime; } - @Override - protected long nowInMillisImpl() { - return request.nowInMillis(); - } - @Override public ScrollContext scrollContext() { return this.scrollContext; @@ -501,11 +501,6 @@ final class DefaultSearchContext extends SearchContext { return indexService.similarityService(); } - @Override - public ScriptService scriptService() { - return scriptService; - } - @Override public BigArrays bigArrays() { return bigArrays; @@ -580,6 +575,15 @@ final class DefaultSearchContext extends SearchContext { return this; } + @Override + public boolean lowLevelCancellation() { + return lowLevelCancellation; + } + + public void lowLevelCancellation(boolean lowLevelCancellation) { + this.lowLevelCancellation = lowLevelCancellation; + } + @Override public FieldDoc searchAfter() { return searchAfter; @@ -748,15 +752,6 @@ final class DefaultSearchContext extends SearchContext { this.keepAlive = keepAlive; } - @Override - public SearchLookup lookup() { - // TODO: The types should take into account the parsing context in QueryParserContext... - if (searchLookup == null) { - searchLookup = new SearchLookup(mapperService(), fieldData(), request.types()); - } - return searchLookup; - } - @Override public DfsSearchResult dfsResult() { return dfsResult; @@ -810,4 +805,19 @@ final class DefaultSearchContext extends SearchContext { public void setProfilers(Profilers profilers) { this.profilers = profilers; } + + @Override + public void setTask(SearchTask task) { + this.task = task; + } + + @Override + public SearchTask getTask() { + return task; + } + + @Override + public boolean isCancelled() { + return task.isCancelled(); + } } diff --git a/core/src/main/java/org/elasticsearch/search/DocValueFormat.java b/core/src/main/java/org/elasticsearch/search/DocValueFormat.java index 4cbb8720d77..47f7799ae90 100644 --- a/core/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/core/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -41,7 +41,7 @@ import java.text.ParseException; import java.util.Arrays; import java.util.Locale; import java.util.Objects; -import java.util.concurrent.Callable; +import java.util.function.LongSupplier; /** A formatter for values as returned by the fielddata/doc-values APIs. */ public interface DocValueFormat extends NamedWriteable { @@ -63,11 +63,11 @@ public interface DocValueFormat extends NamedWriteable { /** Parse a value that was formatted with {@link #format(long)} back to the * original long value. */ - long parseLong(String value, boolean roundUp, Callable now); + long parseLong(String value, boolean roundUp, LongSupplier now); /** Parse a value that was formatted with {@link #format(double)} back to * the original double value. */ - double parseDouble(String value, boolean roundUp, Callable now); + double parseDouble(String value, boolean roundUp, LongSupplier now); /** Parse a value that was formatted with {@link #format(BytesRef)} back * to the original BytesRef. */ @@ -100,7 +100,7 @@ public interface DocValueFormat extends NamedWriteable { } @Override - public long parseLong(String value, boolean roundUp, Callable now) { + public long parseLong(String value, boolean roundUp, LongSupplier now) { double d = Double.parseDouble(value); if (roundUp) { d = Math.ceil(d); @@ -111,7 +111,7 @@ public interface DocValueFormat extends NamedWriteable { } @Override - public double parseDouble(String value, boolean roundUp, Callable now) { + public double parseDouble(String value, boolean roundUp, LongSupplier now) { return Double.parseDouble(value); } @@ -166,12 +166,12 @@ public interface DocValueFormat extends NamedWriteable { } @Override - public long parseLong(String value, boolean roundUp, Callable now) { + public long parseLong(String value, boolean roundUp, LongSupplier now) { return parser.parse(value, now, roundUp, timeZone); } @Override - public double parseDouble(String value, boolean roundUp, Callable now) { + public double parseDouble(String value, boolean roundUp, LongSupplier now) { return parseLong(value, roundUp, now); } @@ -208,12 +208,12 @@ public interface DocValueFormat extends NamedWriteable { } @Override - public long parseLong(String value, boolean roundUp, Callable now) { + public long parseLong(String value, boolean roundUp, LongSupplier now) { throw new UnsupportedOperationException(); } @Override - public double parseDouble(String value, boolean roundUp, Callable now) { + public double parseDouble(String value, boolean roundUp, LongSupplier now) { throw new UnsupportedOperationException(); } @@ -250,7 +250,7 @@ public interface DocValueFormat extends NamedWriteable { } @Override - public long parseLong(String value, boolean roundUp, Callable now) { + public long parseLong(String value, boolean roundUp, LongSupplier now) { switch (value) { case "false": return 0; @@ -261,7 +261,7 @@ public interface DocValueFormat extends NamedWriteable { } @Override - public double parseDouble(String value, boolean roundUp, Callable now) { + public double parseDouble(String value, boolean roundUp, LongSupplier now) { throw new UnsupportedOperationException(); } @@ -300,12 +300,12 @@ public interface DocValueFormat extends NamedWriteable { } @Override - public long parseLong(String value, boolean roundUp, Callable now) { + public long parseLong(String value, boolean roundUp, LongSupplier now) { throw new UnsupportedOperationException(); } @Override - public double parseDouble(String value, boolean roundUp, Callable now) { + public double parseDouble(String value, boolean roundUp, LongSupplier now) { throw new UnsupportedOperationException(); } @@ -358,7 +358,7 @@ public interface DocValueFormat extends NamedWriteable { } @Override - public long parseLong(String value, boolean roundUp, Callable now) { + public long parseLong(String value, boolean roundUp, LongSupplier now) { Number n; try { n = format.parse(value); @@ -379,7 +379,7 @@ public interface DocValueFormat extends NamedWriteable { } @Override - public double parseDouble(String value, boolean roundUp, Callable now) { + public double parseDouble(String value, boolean roundUp, LongSupplier now) { Number n; try { n = format.parse(value); diff --git a/core/src/main/java/org/elasticsearch/search/SearchContextException.java b/core/src/main/java/org/elasticsearch/search/SearchContextException.java index 2dc25a50373..8f1ebb80139 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchContextException.java +++ b/core/src/main/java/org/elasticsearch/search/SearchContextException.java @@ -24,9 +24,6 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -/** - * - */ public class SearchContextException extends SearchException { public SearchContextException(SearchContext context, String msg) { diff --git a/core/src/main/java/org/elasticsearch/search/SearchContextMissingException.java b/core/src/main/java/org/elasticsearch/search/SearchContextMissingException.java index d2f30e72c3a..9e8d41e2ae2 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchContextMissingException.java +++ b/core/src/main/java/org/elasticsearch/search/SearchContextMissingException.java @@ -26,9 +26,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -/** - * - */ public class SearchContextMissingException extends ElasticsearchException { private final long id; diff --git a/core/src/main/java/org/elasticsearch/search/SearchException.java b/core/src/main/java/org/elasticsearch/search/SearchException.java index 535f8acd446..2633378511e 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchException.java +++ b/core/src/main/java/org/elasticsearch/search/SearchException.java @@ -26,9 +26,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - * - */ public class SearchException extends ElasticsearchException implements ElasticsearchWrapperException { private final SearchShardTarget shardTarget; diff --git a/core/src/main/java/org/elasticsearch/search/SearchParseException.java b/core/src/main/java/org/elasticsearch/search/SearchParseException.java index c0a9a370270..223225af2a6 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchParseException.java +++ b/core/src/main/java/org/elasticsearch/search/SearchParseException.java @@ -29,9 +29,6 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -/** - * - */ public class SearchParseException extends SearchContextException { public static final int UNKNOWN_POSITION = -1; diff --git a/core/src/main/java/org/elasticsearch/search/SearchPhaseResult.java b/core/src/main/java/org/elasticsearch/search/SearchPhaseResult.java index 067761b9b71..003f37616f5 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchPhaseResult.java +++ b/core/src/main/java/org/elasticsearch/search/SearchPhaseResult.java @@ -21,9 +21,6 @@ package org.elasticsearch.search; import org.elasticsearch.common.io.stream.Streamable; -/** - * - */ public interface SearchPhaseResult extends Streamable { long id(); diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 4334c5cf541..9666df8cc56 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -22,8 +22,11 @@ package org.elasticsearch.search; import com.carrotsearch.hppc.ObjectFloatHashMap; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -64,6 +67,7 @@ import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; @@ -81,6 +85,7 @@ import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Cancellable; import org.elasticsearch.threadpool.ThreadPool.Names; @@ -104,6 +109,13 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), Property.NodeScope); public static final Setting KEEPALIVE_INTERVAL_SETTING = Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), Property.NodeScope); + /** + * Enables low-level, frequent search cancellation checks. Enabling low-level checks will make long running searches to react + * to the cancellation request faster. However, since it will produce more cancellation checks it might slow the search performance + * down. + */ + public static final Setting LOW_LEVEL_CANCELLATION_SETTING = + Setting.boolSetting("search.low_level_cancellation", false, Property.Dynamic, Property.NodeScope); public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = @@ -130,6 +142,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private volatile TimeValue defaultSearchTimeout; + private volatile boolean lowLevelCancellation; + private final Cancellable keepAliveReaper; private final AtomicLong idGenerator = new AtomicLong(); @@ -157,12 +171,19 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); + + lowLevelCancellation = LOW_LEVEL_CANCELLATION_SETTING.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(LOW_LEVEL_CANCELLATION_SETTING, this::setLowLevelCancellation); } private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) { this.defaultSearchTimeout = defaultSearchTimeout; } + private void setLowLevelCancellation(Boolean lowLevelCancellation) { + this.lowLevelCancellation = lowLevelCancellation; + } + @Override public void afterIndexClosed(Index index, Settings indexSettings) { // once an index is closed we can just clean up all the pending search context information @@ -209,10 +230,11 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv keepAliveReaper.cancel(); } - public DfsSearchResult executeDfsPhase(ShardSearchRequest request) throws IOException { + public DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchTask task) throws IOException { final SearchContext context = createAndPutContext(request); context.incRef(); try { + context.setTask(task); contextProcessing(context); dfsPhase.execute(context); contextProcessedSuccessfully(context); @@ -231,6 +253,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv */ private void loadOrExecuteQueryPhase(final ShardSearchRequest request, final SearchContext context) throws Exception { final boolean canCache = indicesService.canCache(request, context); + context.getQueryShardContext().freezeContext(); if (canCache) { indicesService.loadIntoContext(request, context, queryPhase); } else { @@ -238,11 +261,12 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } } - public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) throws IOException { + public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request, SearchTask task) throws IOException { final SearchContext context = createAndPutContext(request); final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); context.incRef(); try { + context.setTask(task); operationListener.onPreQueryPhase(context); long time = System.nanoTime(); contextProcessing(context); @@ -261,7 +285,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv // execution exception can happen while loading the cache, strip it if (e instanceof ExecutionException) { e = (e.getCause() == null || e.getCause() instanceof Exception) ? - (Exception) e.getCause() : new ElasticsearchException(e.getCause()); + (Exception) e.getCause() : new ElasticsearchException(e.getCause()); } operationListener.onFailedQueryPhase(context); logger.trace("Query phase failed", e); @@ -272,11 +296,12 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } } - public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) { + public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request, SearchTask task) { final SearchContext context = findContext(request.id()); SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); context.incRef(); try { + context.setTask(task); operationListener.onPreQueryPhase(context); long time = System.nanoTime(); contextProcessing(context); @@ -295,8 +320,9 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } } - public QuerySearchResult executeQueryPhase(QuerySearchRequest request) { + public QuerySearchResult executeQueryPhase(QuerySearchRequest request, SearchTask task) { final SearchContext context = findContext(request.id()); + context.setTask(task); IndexShard indexShard = context.indexShard(); SearchOperationListener operationListener = indexShard.getSearchOperationListener(); context.incRef(); @@ -335,11 +361,12 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } } - public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) throws IOException { + public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request, SearchTask task) throws IOException { final SearchContext context = createAndPutContext(request); context.incRef(); try { contextProcessing(context); + context.setTask(task); SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); operationListener.onPreQueryPhase(context); long time = System.nanoTime(); @@ -375,10 +402,11 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } } - public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) { + public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request, SearchTask task) { final SearchContext context = findContext(request.id()); context.incRef(); try { + context.setTask(task); contextProcessing(context); context.searcher().setAggregatedDfs(request.dfs()); SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); @@ -416,10 +444,11 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } } - public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) { + public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request, SearchTask task) { final SearchContext context = findContext(request.id()); context.incRef(); try { + context.setTask(task); contextProcessing(context); SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); processScroll(request, context); @@ -448,7 +477,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } operationListener.onFetchPhase(context, System.nanoTime() - time2); return new ScrollQueryFetchSearchResult(new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), - context.shardTarget()); + context.shardTarget()); } catch (Exception e) { logger.trace("Fetch phase failed", e); processFailure(context, e); @@ -458,11 +487,12 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } } - public FetchSearchResult executeFetchPhase(ShardFetchRequest request) { + public FetchSearchResult executeFetchPhase(ShardFetchRequest request, SearchTask task) { final SearchContext context = findContext(request.id()); final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); context.incRef(); try { + context.setTask(task); contextProcessing(context); if (request.lastEmittedDoc() != null) { context.scrollContext().lastEmittedDoc = request.lastEmittedDoc(); @@ -493,7 +523,6 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv if (context == null) { throw new SearchContextMissingException(id); } - SearchContext.setCurrent(context); return context; } @@ -516,17 +545,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException { - - DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, searcher); - SearchContext.setCurrent(context); + final DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, searcher); try { - request.rewrite(context.getQueryShardContext()); - // reset that we have used nowInMillis from the context since it may - // have been rewritten so its no longer in the query and the request can - // be cached. If it is still present in the request (e.g. in a range - // aggregation) it will still be caught when the aggregation is - // evaluated. - context.resetNowInMillisUsed(); if (request.scroll() != null) { context.scrollContext(new ScrollContext()); context.scrollContext().scroll = request.scroll(); @@ -552,6 +572,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv keepAlive = request.scroll().keepAlive().millis(); } context.keepAlive(keepAlive); + context.lowLevelCancellation(lowLevelCancellation); } catch (Exception e) { context.close(); throw ExceptionsHelper.convertToRuntime(e); @@ -560,16 +581,30 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv return context; } - public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) { + public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) + throws IOException { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.getShard(request.shardId().getId()); SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId()); Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; - return new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, - indexService, - indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, - timeout, fetchPhase); + final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, + engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, + timeout, fetchPhase); + boolean success = false; + try { + // we clone the query shard context here just for rewriting otherwise we + // might end up with incorrect state since we are using now() or script services + // during rewrite and normalized / evaluate templates etc. + request.rewrite(new QueryShardContext(searchContext.getQueryShardContext())); + assert searchContext.getQueryShardContext().isCachable(); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(searchContext); + } + } + return searchContext; } private void freeAllContextForIndex(Index index) { @@ -618,9 +653,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private void cleanContext(SearchContext context) { try { - assert context == SearchContext.current(); context.clearReleasables(Lifetime.PHASE); - SearchContext.removeCurrent(); + context.setTask(null); } finally { context.decRef(); } @@ -735,8 +769,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } if (source.scriptFields() != null) { for (org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField field : source.scriptFields()) { - SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH, - Collections.emptyMap()); + SearchScript searchScript = scriptService.search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH); context.scriptFields().add(new ScriptField(field.fieldName(), searchScript, field.ignoreFailure())); } } @@ -859,10 +892,14 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } if ((time - lastAccessTime > context.keepAlive())) { logger.debug("freeing search context [{}], time [{}], lastAccessTime [{}], keepAlive [{}]", context.id(), time, - lastAccessTime, context.keepAlive()); + lastAccessTime, context.keepAlive()); freeContext(context.id()); } } } } + + public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) { + return indicesService.buildAliasFilter(state, index, expressions); + } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 616b0091fe8..a18395d5e66 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -40,9 +40,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -/** - * - */ public class AggregatorFactories { public static final AggregatorFactories EMPTY = new AggregatorFactories(null, new AggregatorFactory[0], diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/HasAggregations.java b/core/src/main/java/org/elasticsearch/search/aggregations/HasAggregations.java index 46020b050bc..53302f04905 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/HasAggregations.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/HasAggregations.java @@ -19,9 +19,6 @@ package org.elasticsearch.search.aggregations; -/** - * - */ public interface HasAggregations { Aggregations getAggregations(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index b7635d3dc32..df25f0e2635 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; @@ -69,12 +68,10 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, Na private final BigArrays bigArrays; private final ScriptService scriptService; - private final ClusterState clusterState; - public ReduceContext(BigArrays bigArrays, ScriptService scriptService, ClusterState clusterState) { + public ReduceContext(BigArrays bigArrays, ScriptService scriptService) { this.bigArrays = bigArrays; this.scriptService = scriptService; - this.clusterState = clusterState; } public BigArrays bigArrays() { @@ -84,10 +81,6 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, Na public ScriptService scriptService() { return scriptService; } - - public ClusterState clusterState() { - return clusterState; - } } protected final String name; @@ -126,7 +119,6 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, Na protected abstract void doWriteTo(StreamOutput out) throws IOException; - @Override public String getName() { return name; @@ -215,5 +207,4 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, Na public static final String TO = "to"; public static final String TO_AS_STRING = "to_as_string"; } - } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index ab655497c4c..ca355320408 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -35,9 +35,6 @@ import java.util.Arrays; import java.util.List; import java.util.Map; -/** - * - */ public abstract class BucketsAggregator extends AggregatorBase { private final BigArrays bigArrays; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java index 48be5365bb1..78d19280ce2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java @@ -72,7 +72,9 @@ public class FilterAggregationBuilder extends AbstractAggregationBuilder doBuild(AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder) throws IOException { - return new FilterAggregatorFactory(name, type, filter, context, parent, subFactoriesBuilder, metaData); + // TODO this sucks we need a rewrite phase for aggregations too + final QueryBuilder rewrittenFilter = QueryBuilder.rewriteQuery(filter, context.searchContext().getQueryShardContext()); + return new FilterAggregatorFactory(name, type, rewrittenFilter, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java index 08bbdaf3e3b..2c91b62a84d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java @@ -47,9 +47,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -/** - * - */ public class FiltersAggregator extends BucketsAggregator { public static final ParseField FILTERS_FIELD = new ParseField("filters"); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java index 6e4980ede27..bc5057eee14 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java @@ -31,9 +31,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class GlobalAggregator extends SingleBucketAggregator { public GlobalAggregator(String name, AggregatorFactories subFactories, AggregationContext aggregationContext, List pipelineAggregators, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index d3b8857ccab..b20cf1b346a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -21,6 +21,8 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -35,8 +37,12 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; +import static java.util.Collections.unmodifiableMap; + /** * A builder for histograms on date fields. */ @@ -44,6 +50,29 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = InternalDateHistogram.TYPE.name(); + public static final Map DATE_FIELD_UNITS; + + static { + Map dateFieldUnits = new HashMap<>(); + dateFieldUnits.put("year", DateTimeUnit.YEAR_OF_CENTURY); + dateFieldUnits.put("1y", DateTimeUnit.YEAR_OF_CENTURY); + dateFieldUnits.put("quarter", DateTimeUnit.QUARTER); + dateFieldUnits.put("1q", DateTimeUnit.QUARTER); + dateFieldUnits.put("month", DateTimeUnit.MONTH_OF_YEAR); + dateFieldUnits.put("1M", DateTimeUnit.MONTH_OF_YEAR); + dateFieldUnits.put("week", DateTimeUnit.WEEK_OF_WEEKYEAR); + dateFieldUnits.put("1w", DateTimeUnit.WEEK_OF_WEEKYEAR); + dateFieldUnits.put("day", DateTimeUnit.DAY_OF_MONTH); + dateFieldUnits.put("1d", DateTimeUnit.DAY_OF_MONTH); + dateFieldUnits.put("hour", DateTimeUnit.HOUR_OF_DAY); + dateFieldUnits.put("1h", DateTimeUnit.HOUR_OF_DAY); + dateFieldUnits.put("minute", DateTimeUnit.MINUTES_OF_HOUR); + dateFieldUnits.put("1m", DateTimeUnit.MINUTES_OF_HOUR); + dateFieldUnits.put("second", DateTimeUnit.SECOND_OF_MINUTE); + dateFieldUnits.put("1s", DateTimeUnit.SECOND_OF_MINUTE); + DATE_FIELD_UNITS = unmodifiableMap(dateFieldUnits); + } + private long interval; private DateHistogramInterval dateHistogramInterval; private long offset = 0; @@ -245,8 +274,36 @@ public class DateHistogramAggregationBuilder @Override protected ValuesSourceAggregatorFactory innerBuild(AggregationContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + Rounding rounding = createRounding(); + ExtendedBounds roundedBounds = null; + if (this.extendedBounds != null) { + // parse any string bounds to longs and round + roundedBounds = this.extendedBounds.parseAndValidate(name, context.searchContext(), config.format()).round(rounding); + } return new DateHistogramAggregatorFactory(name, type, config, interval, dateHistogramInterval, offset, order, keyed, minDocCount, - extendedBounds, context, parent, subFactoriesBuilder, metaData); + rounding, roundedBounds, context, parent, subFactoriesBuilder, metaData); + } + + private Rounding createRounding() { + Rounding.Builder tzRoundingBuilder; + if (dateHistogramInterval != null) { + DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); + if (dateTimeUnit != null) { + tzRoundingBuilder = Rounding.builder(dateTimeUnit); + } else { + // the interval is a time value? + tzRoundingBuilder = Rounding.builder( + TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval")); + } + } else { + // the interval is an integer time value in millis? + tzRoundingBuilder = Rounding.builder(TimeValue.timeValueMillis(interval)); + } + if (timeZone() != null) { + tzRoundingBuilder.timeZone(timeZone()); + } + Rounding rounding = tzRoundingBuilder.build(); + return rounding; } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 79f81e28374..f1c4a6b4fae 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -19,9 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; -import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.common.rounding.Rounding; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -30,12 +28,9 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import java.io.IOException; -import java.util.HashMap; import java.util.List; import java.util.Map; -import static java.util.Collections.unmodifiableMap; - import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -44,29 +39,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; public final class DateHistogramAggregatorFactory extends ValuesSourceAggregatorFactory { - public static final Map DATE_FIELD_UNITS; - - static { - Map dateFieldUnits = new HashMap<>(); - dateFieldUnits.put("year", DateTimeUnit.YEAR_OF_CENTURY); - dateFieldUnits.put("1y", DateTimeUnit.YEAR_OF_CENTURY); - dateFieldUnits.put("quarter", DateTimeUnit.QUARTER); - dateFieldUnits.put("1q", DateTimeUnit.QUARTER); - dateFieldUnits.put("month", DateTimeUnit.MONTH_OF_YEAR); - dateFieldUnits.put("1M", DateTimeUnit.MONTH_OF_YEAR); - dateFieldUnits.put("week", DateTimeUnit.WEEK_OF_WEEKYEAR); - dateFieldUnits.put("1w", DateTimeUnit.WEEK_OF_WEEKYEAR); - dateFieldUnits.put("day", DateTimeUnit.DAY_OF_MONTH); - dateFieldUnits.put("1d", DateTimeUnit.DAY_OF_MONTH); - dateFieldUnits.put("hour", DateTimeUnit.HOUR_OF_DAY); - dateFieldUnits.put("1h", DateTimeUnit.HOUR_OF_DAY); - dateFieldUnits.put("minute", DateTimeUnit.MINUTES_OF_HOUR); - dateFieldUnits.put("1m", DateTimeUnit.MINUTES_OF_HOUR); - dateFieldUnits.put("second", DateTimeUnit.SECOND_OF_MINUTE); - dateFieldUnits.put("1s", DateTimeUnit.SECOND_OF_MINUTE); - DATE_FIELD_UNITS = unmodifiableMap(dateFieldUnits); - } - private final DateHistogramInterval dateHistogramInterval; private final long interval; private final long offset; @@ -74,10 +46,11 @@ public final class DateHistogramAggregatorFactory private final boolean keyed; private final long minDocCount; private final ExtendedBounds extendedBounds; + private Rounding rounding; public DateHistogramAggregatorFactory(String name, Type type, ValuesSourceConfig config, long interval, DateHistogramInterval dateHistogramInterval, long offset, InternalOrder order, boolean keyed, long minDocCount, - ExtendedBounds extendedBounds, AggregationContext context, AggregatorFactory parent, + Rounding rounding, ExtendedBounds extendedBounds, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, type, config, context, parent, subFactoriesBuilder, metaData); this.interval = interval; @@ -87,34 +60,13 @@ public final class DateHistogramAggregatorFactory this.keyed = keyed; this.minDocCount = minDocCount; this.extendedBounds = extendedBounds; + this.rounding = rounding; } public long minDocCount() { return minDocCount; } - private Rounding createRounding() { - Rounding.Builder tzRoundingBuilder; - if (dateHistogramInterval != null) { - DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); - if (dateTimeUnit != null) { - tzRoundingBuilder = Rounding.builder(dateTimeUnit); - } else { - // the interval is a time value? - tzRoundingBuilder = Rounding.builder( - TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval")); - } - } else { - // the interval is an integer time value in millis? - tzRoundingBuilder = Rounding.builder(TimeValue.timeValueMillis(interval)); - } - if (timeZone() != null) { - tzRoundingBuilder.timeZone(timeZone()); - } - Rounding rounding = tzRoundingBuilder.build(); - return rounding; - } - @Override protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) throws IOException { @@ -126,18 +78,7 @@ public final class DateHistogramAggregatorFactory private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { - Rounding rounding = createRounding(); - // we need to round the bounds given by the user and we have to do it - // for every aggregator we create - // as the rounding is not necessarily an idempotent operation. - // todo we need to think of a better structure to the factory/agtor - // code so we won't need to do that - ExtendedBounds roundedBounds = null; - if (extendedBounds != null) { - // parse any string bounds to longs and round them - roundedBounds = extendedBounds.parseAndValidate(name, context.searchContext(), config.format()).round(rounding); - } - return new DateHistogramAggregator(name, factories, rounding, offset, order, keyed, minDocCount, roundedBounds, valuesSource, + return new DateHistogramAggregator(name, factories, rounding, offset, order, keyed, minDocCount, extendedBounds, valuesSource, config.format(), context, parent, pipelineAggregators, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java index 46fae19e49f..85160347612 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java @@ -153,11 +153,11 @@ public class ExtendedBounds implements ToXContent, Writeable { Long max = this.max; assert format != null; if (minAsStr != null) { - min = format.parseLong(minAsStr, false, context::nowInMillis); + min = format.parseLong(minAsStr, false, context.getQueryShardContext()::nowInMillis); } if (maxAsStr != null) { // TODO: Should we rather pass roundUp=true? - max = format.parseLong(maxAsStr, false, context::nowInMillis); + max = format.parseLong(maxAsStr, false, context.getQueryShardContext()::nowInMillis); } if (min != null && max != null && min.compareTo(max) > 0) { throw new SearchParseException(context, "[extended_bounds.min][" + min + "] cannot be greater than " + diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java index e80be56f341..013c25cfc88 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java @@ -34,9 +34,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class MissingAggregator extends SingleBucketAggregator { private final ValuesSource valuesSource; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index d45f103ed5e..7e88e38b7f2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -40,9 +40,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class ReverseNestedAggregator extends SingleBucketAggregator { static final ParseField PATH_FIELD = new ParseField("path"); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java index f4103d87fbd..f67bec631bc 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java @@ -40,10 +40,10 @@ public class AbstractRangeAggregatorFactory { private final InternalRange.Factory rangeFactory; - private final List ranges; + private final R[] ranges; private final boolean keyed; - public AbstractRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, List ranges, boolean keyed, + public AbstractRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, R[] ranges, boolean keyed, InternalRange.Factory rangeFactory, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, type, config, context, parent, subFactoriesBuilder, metaData); @@ -55,7 +55,7 @@ public class AbstractRangeAggregatorFactory pipelineAggregators, Map metaData) throws IOException { - return new Unmapped(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData); + return new Unmapped<>(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java index 13d10bd0a0c..0692b0ed304 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java @@ -19,13 +19,17 @@ package org.elasticsearch.search.aggregations.bucket.range; +import org.apache.lucene.util.InPlaceMergeSorter; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range; +import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import java.io.IOException; import java.util.ArrayList; @@ -55,6 +59,40 @@ public abstract class AbstractRangeBuilder config) { + Range[] ranges = new Range[this.ranges.size()]; + for (int i = 0; i < ranges.length; i++) { + ranges[i] = this.ranges.get(i).process(config.format(), context.searchContext()); + } + sortRanges(ranges); + return ranges; + } + + private static void sortRanges(final Range[] ranges) { + new InPlaceMergeSorter() { + + @Override + protected void swap(int i, int j) { + final Range tmp = ranges[i]; + ranges[i] = ranges[j]; + ranges[j] = tmp; + } + + @Override + protected int compare(int i, int j) { + int cmp = Double.compare(ranges[i].from, ranges[j].from); + if (cmp == 0) { + cmp = Double.compare(ranges[i].to, ranges[j].to); + } + return cmp; + } + }.sort(0, ranges.length); + } + @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(ranges.size()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index 54ee27bfa96..c35956e2dcf 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -35,9 +35,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -/** - * - */ public class InternalRange> extends InternalMultiBucketAggregation implements Range { static final Factory FACTORY = new Factory(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java index c815ae9d3cf..73a4d86819a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java @@ -114,6 +114,8 @@ public class RangeAggregationBuilder extends AbstractRangeBuilder config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + // We need to call processRanges here so they are parsed before we make the decision of whether to cache the request + Range[] ranges = processRanges(context, config); return new RangeAggregatorFactory(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index c83e2d2c721..3ca79c82e72 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.range; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.util.InPlaceMergeSorter; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; @@ -49,9 +48,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -/** - * - */ public class RangeAggregator extends BucketsAggregator { public static final ParseField RANGES_FIELD = new ParseField("ranges"); @@ -119,10 +115,10 @@ public class RangeAggregator extends BucketsAggregator { Double from = this.from; Double to = this.to; if (fromAsStr != null) { - from = parser.parseDouble(fromAsStr, false, context::nowInMillis); + from = parser.parseDouble(fromAsStr, false, context.getQueryShardContext()::nowInMillis); } if (toAsStr != null) { - to = parser.parseDouble(toAsStr, false, context::nowInMillis); + to = parser.parseDouble(toAsStr, false, context.getQueryShardContext()::nowInMillis); } return new Range(key, from, fromAsStr, to, toAsStr); } @@ -210,7 +206,7 @@ public class RangeAggregator extends BucketsAggregator { final double[] maxTo; public RangeAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, DocValueFormat format, - InternalRange.Factory rangeFactory, List ranges, boolean keyed, AggregationContext aggregationContext, + InternalRange.Factory rangeFactory, Range[] ranges, boolean keyed, AggregationContext aggregationContext, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); @@ -220,11 +216,7 @@ public class RangeAggregator extends BucketsAggregator { this.keyed = keyed; this.rangeFactory = rangeFactory; - this.ranges = new Range[ranges.size()]; - for (int i = 0; i < this.ranges.length; i++) { - this.ranges[i] = ranges.get(i).process(format, context.searchContext()); - } - sortRanges(this.ranges); + this.ranges = ranges; maxTo = new double[this.ranges.length]; maxTo[0] = this.ranges[0].to; @@ -337,45 +329,21 @@ public class RangeAggregator extends BucketsAggregator { return rangeFactory.create(name, buckets, format, keyed, pipelineAggregators(), metaData()); } - private static void sortRanges(final Range[] ranges) { - new InPlaceMergeSorter() { - - @Override - protected void swap(int i, int j) { - final Range tmp = ranges[i]; - ranges[i] = ranges[j]; - ranges[j] = tmp; - } - - @Override - protected int compare(int i, int j) { - int cmp = Double.compare(ranges[i].from, ranges[j].from); - if (cmp == 0) { - cmp = Double.compare(ranges[i].to, ranges[j].to); - } - return cmp; - } - }.sort(0, ranges.length); - } - public static class Unmapped extends NonCollectingAggregator { - private final List ranges; + private final R[] ranges; private final boolean keyed; private final InternalRange.Factory factory; private final DocValueFormat format; @SuppressWarnings("unchecked") - public Unmapped(String name, List ranges, boolean keyed, DocValueFormat format, + public Unmapped(String name, R[] ranges, boolean keyed, DocValueFormat format, AggregationContext context, Aggregator parent, InternalRange.Factory factory, List pipelineAggregators, Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); - this.ranges = new ArrayList<>(); - for (R range : ranges) { - this.ranges.add((R) range.process(format, context.searchContext())); - } + this.ranges = ranges; this.keyed = keyed; this.format = format; this.factory = factory; @@ -384,7 +352,7 @@ public class RangeAggregator extends BucketsAggregator { @Override public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); - List buckets = new ArrayList<>(ranges.size()); + List buckets = new ArrayList<>(ranges.length); for (RangeAggregator.Range range : ranges) { buckets.add(factory.createBucket(range.key, range.from, range.to, 0, subAggs, keyed, format)); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java index 5dec4c40c45..b3297401457 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java @@ -29,12 +29,11 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; -import java.util.List; import java.util.Map; public class RangeAggregatorFactory extends AbstractRangeAggregatorFactory { - public RangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, List ranges, boolean keyed, + public RangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, Range[] ranges, boolean keyed, Factory rangeFactory, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java index c8cb2c76715..51c732e9523 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java @@ -33,9 +33,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -/** - * - */ public class RangeParser extends NumericValuesSourceParser { public RangeParser() { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java index a75b071569c..c8a8e16640b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java @@ -259,6 +259,9 @@ public class DateRangeAggregationBuilder extends AbstractRangeBuilder config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + // We need to call processRanges here so they are parsed and we know whether `now` has been used before we make + // the decision of whether to cache the request + Range[] ranges = processRanges(context, config); return new DateRangeAggregatorFactory(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregatorFactory.java index d3bb7ac6238..d5d16123ec3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregatorFactory.java @@ -30,12 +30,11 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; -import java.util.List; import java.util.Map; public class DateRangeAggregatorFactory extends AbstractRangeAggregatorFactory { - public DateRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, List ranges, boolean keyed, + public DateRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, Range[] ranges, boolean keyed, Factory rangeFactory, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java index f8eff715abb..9277a2d6a3d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java @@ -28,9 +28,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.util.List; import java.util.Map; -/** - * - */ public class DateRangeParser extends RangeParser { public DateRangeParser() { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java index f0dfec2312f..f7b55ab9916 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java @@ -32,9 +32,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class InternalDateRange extends InternalRange { public static final Factory FACTORY = new Factory(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java index 4a4cab2affa..583bc83feb4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java @@ -215,6 +215,7 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde protected ValuesSourceAggregatorFactory innerBuild(AggregationContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + Range[] ranges = this.ranges.toArray(new Range[this.range().size()]); return new GeoDistanceRangeAggregatorFactory(name, type, config, origin, ranges, unit, distanceType, keyed, context, parent, subFactoriesBuilder, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java index 677731d64ef..d43c2218018 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java @@ -38,9 +38,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -/** - * - */ public class GeoDistanceParser extends GeoPointValuesSourceParser { static final ParseField ORIGIN_FIELD = new ParseField("origin", "center", "point", "por"); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java index 32c3592a8fc..62aa18b168a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java @@ -51,13 +51,13 @@ public class GeoDistanceRangeAggregatorFactory private final InternalRange.Factory rangeFactory = InternalGeoDistance.FACTORY; private final GeoPoint origin; - private final List ranges; + private final Range[] ranges; private final DistanceUnit unit; private final GeoDistance distanceType; private final boolean keyed; public GeoDistanceRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, GeoPoint origin, - List ranges, DistanceUnit unit, GeoDistance distanceType, boolean keyed, AggregationContext context, + Range[] ranges, DistanceUnit unit, GeoDistance distanceType, boolean keyed, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, type, config, context, parent, subFactoriesBuilder, metaData); this.origin = origin; @@ -70,7 +70,7 @@ public class GeoDistanceRangeAggregatorFactory @Override protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { - return new Unmapped(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData); + return new Unmapped<>(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java index f01e0233afd..86fc0372982 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java @@ -31,9 +31,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class InternalGeoDistance extends InternalRange { public static final Factory FACTORY = new Factory(); @@ -119,4 +116,9 @@ public class InternalGeoDistance extends InternalRange getFactory() { return FACTORY; } -} \ No newline at end of file + + @Override + public String getWriteableName() { + return GeoDistanceAggregationBuilder.NAME; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java index 5d95f0dd494..1f4afebdbe0 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeParser.java @@ -82,9 +82,9 @@ public class IpRangeParser extends BytesValuesSourceParser { if (parseFieldMatcher.match(parser.currentName(), RangeAggregator.Range.KEY_FIELD)) { key = parser.text(); } else if (parseFieldMatcher.match(parser.currentName(), RangeAggregator.Range.FROM_FIELD)) { - from = parser.text(); + from = parser.textOrNull(); } else if (parseFieldMatcher.match(parser.currentName(), RangeAggregator.Range.TO_FIELD)) { - to = parser.text(); + to = parser.textOrNull(); } else if (parseFieldMatcher.match(parser.currentName(), MASK_FIELD)) { mask = parser.text(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java index a62035d7234..49f941b36b7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerParser.java @@ -29,9 +29,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Map; -/** - * - */ public class DiversifiedSamplerParser extends AnyValuesSourceParser { public DiversifiedSamplerParser() { super(true, false); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java index cd18386da1e..cdd1f8d19a7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java @@ -197,13 +197,13 @@ public abstract class InternalSignificantTerms ordered = new BucketSignificancePriorityQueue<>(size); for (Map.Entry> entry : buckets.entrySet()) { List sameTermBuckets = entry.getValue(); final B b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext); - b.updateScore(getSignificanceHeuristic()); + b.updateScore(heuristic); if ((b.score > 0) && (b.subsetDf >= minDocCount)) { ordered.insertWithOverflow(b); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java index 4de8f795401..7d15c8ea1ef 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java @@ -41,9 +41,6 @@ import java.util.Map; import static java.util.Collections.emptyList; -/** - * - */ public class SignificantLongTermsAggregator extends LongTermsAggregator { public SignificantLongTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java index 245297d72cc..5af538965d1 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java @@ -44,9 +44,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Objects; -/** - * - */ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "significant_terms"; public static final InternalAggregation.Type TYPE = new Type(NAME); @@ -220,8 +217,9 @@ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationB @Override protected ValuesSourceAggregatorFactory innerBuild(AggregationContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + SignificanceHeuristic executionHeuristic = this.significanceHeuristic.rewrite(context.searchContext()); return new SignificantTermsAggregatorFactory(name, type, config, includeExclude, executionHint, filterBuilder, - bucketCountThresholds, significanceHeuristic, context, parent, subFactoriesBuilder, metaData); + bucketCountThresholds, executionHeuristic, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index ab30e1b2d4a..6e820b6da26 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -91,15 +91,14 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac : searcher.count(filter); this.bucketCountThresholds = bucketCountThresholds; this.significanceHeuristic = significanceHeuristic; - this.significanceHeuristic.initialize(context.searchContext()); - setFieldInfo(); + setFieldInfo(context.searchContext()); } - private void setFieldInfo() { + private void setFieldInfo(SearchContext context) { if (!config.unmapped()) { this.indexedFieldName = config.fieldContext().field(); - fieldType = SearchContext.current().smartNameFieldType(indexedFieldName); + fieldType = context.smartNameFieldType(indexedFieldName); } } @@ -211,13 +210,13 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } } assert execution != null; - + DocValueFormat format = config.format(); if ((includeExclude != null) && (includeExclude.isRegexBased()) && format != DocValueFormat.RAW) { throw new AggregationExecutionException("Aggregation [" + name + "] cannot support regular expression style include/exclude " + "settings as they can only be applied to string fields. Use an array of values for include/exclude clauses"); } - + return execution.create(name, factories, valuesSource, format, bucketCountThresholds, includeExclude, context, parent, significanceHeuristic, this, pipelineAggregators, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java index 0f08cf0a0a3..5f40fe73546 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParser.java @@ -40,9 +40,6 @@ import java.io.IOException; import java.util.Map; import java.util.Optional; -/** - * - */ public class SignificantTermsParser extends AbstractTermsParser { private final ParseFieldRegistry significanceHeuristicParserRegistry; private final IndicesQueriesRegistry queriesRegistry; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index c933f9ef596..9519a95d3a5 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -24,38 +24,57 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; -import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.support.XContentParseContext; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -import java.util.Collections; import java.util.Objects; public class ScriptHeuristic extends SignificanceHeuristic { public static final String NAME = "script_heuristic"; - private final LongAccessor subsetSizeHolder; - private final LongAccessor supersetSizeHolder; - private final LongAccessor subsetDfHolder; - private final LongAccessor supersetDfHolder; private final Script script; - ExecutableScript searchScript = null; + + // This class holds an executable form of the script with private variables ready for execution + // on a single search thread. + static class ExecutableScriptHeuristic extends ScriptHeuristic { + private final LongAccessor subsetSizeHolder; + private final LongAccessor supersetSizeHolder; + private final LongAccessor subsetDfHolder; + private final LongAccessor supersetDfHolder; + private final ExecutableScript executableScript; + + ExecutableScriptHeuristic(Script script, ExecutableScript executableScript){ + super(script); + subsetSizeHolder = new LongAccessor(); + supersetSizeHolder = new LongAccessor(); + subsetDfHolder = new LongAccessor(); + supersetDfHolder = new LongAccessor(); + this.executableScript = executableScript; + executableScript.setNextVar("_subset_freq", subsetDfHolder); + executableScript.setNextVar("_subset_size", subsetSizeHolder); + executableScript.setNextVar("_superset_freq", supersetDfHolder); + executableScript.setNextVar("_superset_size", supersetSizeHolder); + } + + @Override + public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) { + subsetSizeHolder.value = subsetSize; + supersetSizeHolder.value = supersetSize; + subsetDfHolder.value = subsetFreq; + supersetDfHolder.value = supersetFreq; + return ((Number) executableScript.run()).doubleValue(); + } + } public ScriptHeuristic(Script script) { - subsetSizeHolder = new LongAccessor(); - supersetSizeHolder = new LongAccessor(); - subsetDfHolder = new LongAccessor(); - supersetDfHolder = new LongAccessor(); this.script = script; } @@ -72,22 +91,15 @@ public class ScriptHeuristic extends SignificanceHeuristic { } @Override - public void initialize(InternalAggregation.ReduceContext context) { - initialize(context.scriptService()); + public SignificanceHeuristic rewrite(InternalAggregation.ReduceContext context) { + return new ExecutableScriptHeuristic(script, context.scriptService().executable(script, ScriptContext.Standard.AGGS)); } @Override - public void initialize(SearchContext context) { - initialize(context.scriptService()); + public SignificanceHeuristic rewrite(SearchContext context) { + return new ExecutableScriptHeuristic(script, context.getQueryShardContext().getExecutableScript(script, ScriptContext.Standard.AGGS)); } - public void initialize(ScriptService scriptService) { - searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, Collections.emptyMap()); - searchScript.setNextVar("_subset_freq", subsetDfHolder); - searchScript.setNextVar("_subset_size", subsetSizeHolder); - searchScript.setNextVar("_superset_freq", supersetDfHolder); - searchScript.setNextVar("_superset_size", supersetSizeHolder); - } /** * Calculates score with a script @@ -100,19 +112,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { */ @Override public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) { - if (searchScript == null) { - //In tests, wehn calling assertSearchResponse(..) the response is streamed one additional time with an arbitrary version, see assertVersionSerializable(..). - // Now, for version before 1.5.0 the score is computed after streaming the response but for scripts the script does not exists yet. - // assertSearchResponse() might therefore fail although there is no problem. - // This should be replaced by an exception in 2.0. - ESLoggerFactory.getLogger("script heuristic").warn("cannot compute score - script has not been initialized yet."); - return 0; - } - subsetSizeHolder.value = subsetSize; - supersetSizeHolder.value = supersetSize; - subsetDfHolder.value = subsetFreq; - supersetDfHolder.value = supersetFreq; - return ((Number) searchScript.run()).doubleValue(); + throw new UnsupportedOperationException("This scoring heuristic must have 'rewrite' called on it to provide a version ready for use"); } @Override @@ -123,7 +123,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { @Override public XContentBuilder toXContent(XContentBuilder builder, Params builderParams) throws IOException { builder.startObject(NAME); - builder.field(ScriptField.SCRIPT.getPreferredName()); + builder.field(Script.SCRIPT_PARSE_FIELD.getPreferredName()); script.toXContent(builder, builderParams); builder.endObject(); return builder; @@ -157,7 +157,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { if (token.equals(XContentParser.Token.FIELD_NAME)) { currentFieldName = parser.currentName(); } else { - if (context.matchField(currentFieldName, ScriptField.SCRIPT)) { + if (context.matchField(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else { throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. unknown object [{}]", heuristicName, currentFieldName); @@ -171,26 +171,6 @@ public class ScriptHeuristic extends SignificanceHeuristic { return new ScriptHeuristic(script); } - public static class ScriptHeuristicBuilder implements SignificanceHeuristicBuilder { - - private Script script = null; - - public ScriptHeuristicBuilder setScript(Script script) { - this.script = script; - return this; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params builderParams) throws IOException { - builder.startObject(NAME); - builder.field(ScriptField.SCRIPT.getPreferredName()); - script.toXContent(builder, builderParams); - builder.endObject(); - return builder; - } - - } - public final class LongAccessor extends Number { public long value; @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java index db9711c1a8d..7b6cf699741 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java @@ -50,11 +50,23 @@ public abstract class SignificanceHeuristic implements NamedWriteable, ToXConten } } - public void initialize(InternalAggregation.ReduceContext reduceContext) { - + /** + * Provides a hook for subclasses to provide a version of the heuristic + * prepared for execution on data on the coordinating node. + * @param reduceContext the reduce context on the coordinating node + * @return a version of this heuristic suitable for execution + */ + public SignificanceHeuristic rewrite(InternalAggregation.ReduceContext reduceContext) { + return this; } - public void initialize(SearchContext context) { - + /** + * Provides a hook for subclasses to provide a version of the heuristic + * prepared for execution on data on a shard. + * @param context the search context on the data node + * @return a version of this heuristic suitable for execution + */ + public SignificanceHeuristic rewrite(SearchContext context) { + return this; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java index b2b57f9d060..fe3d0dbbf38 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java @@ -36,9 +36,6 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; -/** - * - */ public class DoubleTermsAggregator extends LongTermsAggregator { public DoubleTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, DocValueFormat format, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java index f3f87c09dca..e904c667906 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java @@ -40,9 +40,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Objects; -/** - * - */ class InternalOrder extends Terms.Order { private static final byte COUNT_DESC_ID = 1; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java index fd7296d07d9..cbb2ef6378c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java @@ -42,9 +42,6 @@ import java.util.Map; import static java.util.Collections.emptyList; -/** - * - */ public class LongTermsAggregator extends TermsAggregator { protected final ValuesSource.Numeric valuesSource; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java index bf8b06ab65a..50869a4709c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java @@ -36,9 +36,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; -/** - * - */ public class TermsParser extends AbstractTermsParser { @Override protected TermsAggregationBuilder doCreateFactory(String aggregationName, ValuesSourceType valuesSourceType, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java index 901c52a232d..9f591c7d425 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java @@ -26,9 +26,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public abstract class InternalNumericMetricsAggregation extends InternalMetricsAggregation { private static final DocValueFormat DEFAULT_FORMAT = DocValueFormat.RAW; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java index 3ffd7f79750..9b4fc0b256e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java @@ -26,9 +26,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public abstract class NumericMetricsAggregator extends MetricsAggregator { private NumericMetricsAggregator(String name, AggregationContext context, Aggregator parent, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java index eb0fc42f9c2..b33689b9298 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java @@ -38,9 +38,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { final ValuesSource.Numeric valuesSource; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java index bc6f762295c..49c9e1bb3a0 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgParser.java @@ -28,9 +28,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Map; -/** - * - */ public class AvgParser extends NumericValuesSourceParser { public AvgParser() { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java index 73504f9a8f4..bbd6bf70bc3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java @@ -39,9 +39,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { final ValuesSource.Numeric valuesSource; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java index f0290e93fa9..355b602b82c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxParser.java @@ -28,9 +28,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Map; -/** - * - */ public class MaxParser extends NumericValuesSourceParser { public MaxParser() { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java index a32379b7d10..ba28efbf97d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java @@ -39,9 +39,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class MinAggregator extends NumericMetricsAggregator.SingleValue { final ValuesSource.Numeric valuesSource; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java index 4381ca41899..13dd61f44b6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinParser.java @@ -28,9 +28,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Map; -/** - * - */ public class MinParser extends NumericValuesSourceParser { public MinParser() { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksParser.java index 313b8e7b7c0..7a057cfa962 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksParser.java @@ -22,9 +22,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; -/** - * - */ public class PercentileRanksParser extends AbstractPercentilesParser { public static final ParseField VALUES_FIELD = new ParseField("values"); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesParser.java index 806fb26cd3f..237c66f28ce 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesParser.java @@ -22,9 +22,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; -/** - * - */ public class PercentilesParser extends AbstractPercentilesParser { public static final ParseField PERCENTS_FIELD = new ParseField("percents"); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregator.java index faa6039f56c..eaebf833596 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentileRanksAggregator.java @@ -30,9 +30,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class HDRPercentileRanksAggregator extends AbstractHDRPercentilesAggregator { public HDRPercentileRanksAggregator(String name, Numeric valuesSource, AggregationContext context, Aggregator parent, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregator.java index 0b9c2c43d34..f197b533e55 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregator.java @@ -30,9 +30,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class HDRPercentilesAggregator extends AbstractHDRPercentilesAggregator { public HDRPercentilesAggregator(String name, Numeric valuesSource, AggregationContext context, Aggregator parent, double[] percents, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregator.java index 1151e2272a4..34bb4e26d55 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentileRanksAggregator.java @@ -29,9 +29,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class TDigestPercentileRanksAggregator extends AbstractTDigestPercentilesAggregator { public TDigestPercentileRanksAggregator(String name, Numeric valuesSource, AggregationContext context, Aggregator parent, double[] percents, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregator.java index c0063102e07..40478a20d43 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregator.java @@ -29,9 +29,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class TDigestPercentilesAggregator extends AbstractTDigestPercentilesAggregator { public TDigestPercentilesAggregator(String name, Numeric valuesSource, AggregationContext context, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java index 244881a5155..6e48d844c7d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java @@ -26,18 +26,23 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.support.AggregationContext; - import java.io.IOException; +import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.Function; public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder { @@ -182,10 +187,27 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder @Override protected ScriptedMetricAggregatorFactory doBuild(AggregationContext context, AggregatorFactory parent, Builder subfactoriesBuilder) throws IOException { - return new ScriptedMetricAggregatorFactory(name, type, initScript, mapScript, combineScript, reduceScript, params, context, - parent, subfactoriesBuilder, metaData); + + QueryShardContext queryShardContext = context.searchContext().getQueryShardContext(); + Function, ExecutableScript> executableInitScript; + if (initScript != null) { + executableInitScript = queryShardContext.getLazyExecutableScript(initScript, ScriptContext.Standard.AGGS); + } else { + executableInitScript = (p) -> null;; + } + Function, SearchScript> searchMapScript = queryShardContext.getLazySearchScript(mapScript, + ScriptContext.Standard.AGGS); + Function, ExecutableScript> executableCombineScript; + if (combineScript != null) { + executableCombineScript = queryShardContext.getLazyExecutableScript(combineScript, ScriptContext.Standard.AGGS); + } else { + executableCombineScript = (p) -> null; + } + return new ScriptedMetricAggregatorFactory(name, type, searchMapScript, executableInitScript, executableCombineScript, reduceScript, + params, context, parent, subfactoriesBuilder, metaData); } + @Override protected XContentBuilder internalXContent(XContentBuilder builder, Params builderParams) throws IOException { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index e2d3034fa11..57fd49779fc 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.metrics.scripted; import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.Script; @@ -46,21 +47,14 @@ public class ScriptedMetricAggregator extends MetricsAggregator { private final Script reduceScript; private Map params; - protected ScriptedMetricAggregator(String name, Script initScript, Script mapScript, Script combineScript, Script reduceScript, + protected ScriptedMetricAggregator(String name, SearchScript mapScript, ExecutableScript combineScript, + Script reduceScript, Map params, AggregationContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); this.params = params; - ScriptService scriptService = context.searchContext().scriptService(); - if (initScript != null) { - scriptService.executable(initScript, ScriptContext.Standard.AGGS, Collections.emptyMap()).run(); - } - this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS, Collections.emptyMap()); - if (combineScript != null) { - this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, Collections.emptyMap()); - } else { - this.combineScript = null; - } + this.mapScript = mapScript; + this.combineScript = combineScript; this.reduceScript = reduceScript; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java index f89e99f44b3..7cb74b9ecb7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java @@ -19,7 +19,9 @@ package org.elasticsearch.search.aggregations.metrics.scripted; +import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; +import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -34,22 +36,23 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; +import java.util.function.Function; public class ScriptedMetricAggregatorFactory extends AggregatorFactory { - private final Script initScript; - private final Script mapScript; - private final Script combineScript; + private final Function, SearchScript> mapScript; + private final Function, ExecutableScript> combineScript; private final Script reduceScript; private final Map params; + private final Function, ExecutableScript> initScript; - public ScriptedMetricAggregatorFactory(String name, Type type, Script initScript, Script mapScript, Script combineScript, + public ScriptedMetricAggregatorFactory(String name, Type type, Function, SearchScript> mapScript, + Function, ExecutableScript> initScript, Function, ExecutableScript> combineScript, Script reduceScript, Map params, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { super(name, type, context, parent, subFactories, metaData); - this.initScript = initScript; this.mapScript = mapScript; + this.initScript = initScript; this.combineScript = combineScript; this.reduceScript = reduceScript; this.params = params; @@ -68,16 +71,18 @@ public class ScriptedMetricAggregatorFactory extends AggregatorFactory(); params.put("_agg", new HashMap()); } - return new ScriptedMetricAggregator(name, insertParams(initScript, params), insertParams(mapScript, params), - insertParams(combineScript, params), deepCopyScript(reduceScript, context.searchContext()), params, context, parent, - pipelineAggregators, metaData); - } - private static Script insertParams(Script script, Map params) { - if (script == null) { - return null; + final ExecutableScript initScript = this.initScript.apply(params); + final SearchScript mapScript = this.mapScript.apply(params); + final ExecutableScript combineScript = this.combineScript.apply(params); + + final Script reduceScript = deepCopyScript(this.reduceScript, context.searchContext()); + if (initScript != null) { + initScript.run(); } - return new Script(script.getScript(), script.getType(), script.getLang(), params); + return new ScriptedMetricAggregator(name, mapScript, + combineScript, reduceScript, params, context, parent, + pipelineAggregators, metaData); } private static Script deepCopyScript(Script script, SearchContext context) { @@ -86,7 +91,7 @@ public class ScriptedMetricAggregatorFactory extends AggregatorFactory originalMap = (Map) original; Map clonedMap = new HashMap<>(); - for (Entry e : originalMap.entrySet()) { + for (Map.Entry e : originalMap.entrySet()) { clonedMap.put(deepCopyParams(e.getKey(), context), deepCopyParams(e.getValue(), context)); } clone = (T) clonedMap; } else if (original instanceof List) { List originalList = (List) original; - List clonedList = new ArrayList(); + List clonedList = new ArrayList<>(); for (Object o : originalList) { clonedList.add(deepCopyParams(o, context)); } clone = (T) clonedList; } else if (original instanceof String || original instanceof Integer || original instanceof Long || original instanceof Short - || original instanceof Byte || original instanceof Float || original instanceof Double || original instanceof Character - || original instanceof Boolean) { + || original instanceof Byte || original instanceof Float || original instanceof Double || original instanceof Character + || original instanceof Boolean) { clone = original; } else { throw new SearchParseException(context, - "Can only clone primitives, String, ArrayList, and HashMap. Found: " + original.getClass().getCanonicalName(), null); + "Can only clone primitives, String, ArrayList, and HashMap. Found: " + original.getClass().getCanonicalName(), null); } return clone; } + } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java index 374cbcaf0e6..59d205b0522 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java @@ -38,9 +38,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class StatsAggregator extends NumericMetricsAggregator.MultiValue { final ValuesSource.Numeric valuesSource; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java index 60e3d2ef0aa..bca81226125 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsParser.java @@ -28,9 +28,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Map; -/** - * - */ public class StatsParser extends NumericValuesSourceParser { public StatsParser() { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java index 7715d4b713e..52b08185ba5 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java @@ -39,9 +39,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue { public static final ParseField SIGMA_FIELD = new ParseField("sigma"); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java index 9644d26e93a..3e61d2ccab6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java @@ -28,9 +28,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Map; -/** - * - */ public class ExtendedStatsParser extends NumericValuesSourceParser { public ExtendedStatsParser() { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java index 0d6b5de4a80..4dcaa808213 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java @@ -37,9 +37,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public class SumAggregator extends NumericMetricsAggregator.SingleValue { final ValuesSource.Numeric valuesSource; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java index ee82829b0a7..ac2ec65b01e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumParser.java @@ -28,9 +28,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Map; -/** - * - */ public class SumParser extends NumericValuesSourceParser { public SumParser() { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java index 3547db7140c..1d5ee04d417 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -39,8 +41,10 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; @@ -51,6 +55,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Objects; +import java.util.Optional; import java.util.Set; public class TopHitsAggregationBuilder extends AbstractAggregationBuilder { @@ -280,11 +285,9 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder parent, Builder subfactoriesBuilder) throws IOException { - return new TopHitsAggregatorFactory(name, type, from, size, explain, version, trackScores, sorts, highlightBuilder, - storedFieldsContext, fieldDataFields, scriptFields, fetchSourceContext, context, - parent, subfactoriesBuilder, metaData); + List fields = new ArrayList<>(); + if (scriptFields != null) { + for (ScriptField field : scriptFields) { + SearchScript searchScript = context.searchContext().getQueryShardContext().getSearchScript(field.script(), + ScriptContext.Standard.SEARCH); + fields.add(new org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField( + field.fieldName(), searchScript, field.ignoreFailure())); + } + } + + final Optional optionalSort; + if (sorts == null) { + optionalSort = Optional.empty(); + } else { + optionalSort = SortBuilder.buildSort(sorts, context.searchContext().getQueryShardContext()); + } + return new TopHitsAggregatorFactory(name, type, from, size, explain, version, trackScores, optionalSort, highlightBuilder, + storedFieldsContext, fieldDataFields, fields, fetchSourceContext, context, parent, subfactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java index 07292f1d29f..0b92a0e9bf7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TopScoreDocCollector; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.LongObjectPagedHashMap; @@ -44,14 +45,13 @@ import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.SubSearchContext; +import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.sort.SortAndFormats; import java.io.IOException; import java.util.List; import java.util.Map; -/** - */ public class TopHitsAggregator extends MetricsAggregator { /** Simple wrapper around a top-level collector and the current leaf collector. */ @@ -114,6 +114,11 @@ public class TopHitsAggregator extends MetricsAggregator { if (collectors == null) { SortAndFormats sort = subSearchContext.sort(); int topN = subSearchContext.from() + subSearchContext.size(); + if (sort == null) { + for (RescoreSearchContext rescoreContext : context.searchContext().rescore()) { + topN = Math.max(rescoreContext.window(), topN); + } + } // In the QueryPhase we don't need this protection, because it is build into the IndexSearcher, // but here we create collectors ourselves and we need prevent OOM because of crazy an offset and size. topN = Math.min(topN, subSearchContext.searcher().getIndexReader().maxDoc()); @@ -135,9 +140,18 @@ public class TopHitsAggregator extends MetricsAggregator { if (topDocsCollector == null) { topHits = buildEmptyAggregation(); } else { - final TopDocs topDocs = topDocsCollector.topLevelCollector.topDocs(); - - subSearchContext.queryResult().topDocs(topDocs, subSearchContext.sort() == null ? null : subSearchContext.sort().formats); + TopDocs topDocs = topDocsCollector.topLevelCollector.topDocs(); + if (subSearchContext.sort() == null) { + for (RescoreSearchContext ctx : context().searchContext().rescore()) { + try { + topDocs = ctx.rescorer().rescore(topDocs, context.searchContext(), ctx); + } catch (IOException e) { + throw new ElasticsearchException("Rescore TopHits Failed", e); + } + } + } + subSearchContext.queryResult().topDocs(topDocs, + subSearchContext.sort() == null ? null : subSearchContext.sort().formats); int[] docIdsToLoad = new int[topDocs.scoreDocs.length]; for (int i = 0; i < topDocs.scoreDocs.length; i++) { docIdsToLoad[i] = topDocs.scoreDocs[i].doc; @@ -157,7 +171,7 @@ public class TopHitsAggregator extends MetricsAggregator { } } topHits = new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, fetchResult.hits(), pipelineAggregators(), - metaData()); + metaData()); } return topHits; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java index 7c6a743a20b..9c9e94bc9fa 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java @@ -19,29 +19,23 @@ package org.elasticsearch.search.aggregations.metrics.tophits; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; -import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.internal.SubSearchContext; import org.elasticsearch.search.sort.SortAndFormats; -import org.elasticsearch.search.sort.SortBuilder; - import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; public class TopHitsAggregatorFactory extends AggregatorFactory { @@ -50,25 +44,25 @@ public class TopHitsAggregatorFactory extends AggregatorFactory> sorts; + private final Optional sort; private final HighlightBuilder highlightBuilder; private final StoredFieldsContext storedFieldsContext; private final List docValueFields; - private final Set scriptFields; + private final List scriptFields; private final FetchSourceContext fetchSourceContext; public TopHitsAggregatorFactory(String name, Type type, int from, int size, boolean explain, boolean version, boolean trackScores, - List> sorts, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext, - List docValueFields, Set scriptFields, FetchSourceContext fetchSourceContext, - AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, - Map metaData) throws IOException { + Optional sort, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext, + List docValueFields, List scriptFields, FetchSourceContext fetchSourceContext, + AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) + throws IOException { super(name, type, context, parent, subFactories, metaData); this.from = from; this.size = size; this.explain = explain; this.version = version; this.trackScores = trackScores; - this.sorts = sorts; + this.sort = sort; this.highlightBuilder = highlightBuilder; this.storedFieldsContext = storedFieldsContext; this.docValueFields = docValueFields; @@ -86,11 +80,8 @@ public class TopHitsAggregatorFactory extends AggregatorFactory optionalSort = SortBuilder.buildSort(sorts, subSearchContext.getQueryShardContext()); - if (optionalSort.isPresent()) { - subSearchContext.sort(optionalSort.get()); - } + if (sort.isPresent()) { + subSearchContext.sort(sort.get()); } if (storedFieldsContext != null) { subSearchContext.storedFieldsContext(storedFieldsContext); @@ -98,14 +89,9 @@ public class TopHitsAggregatorFactory extends AggregatorFactory map = parser.map(); @@ -223,7 +222,7 @@ public class BucketScriptPipelineAggregationBuilder extends AbstractPipelineAggr } if (script == null) { - throw new ParsingException(parser.getTokenLocation(), "Missing required field [" + ScriptField.SCRIPT.getPreferredName() + throw new ParsingException(parser.getTokenLocation(), "Missing required field [" + Script.SCRIPT_PARSE_FIELD.getPreferredName() + "] for series_arithmetic aggregation [" + reducerName + "]"); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java index e3b42376728..877be6ea54f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketselector/BucketSelectorPipelineAggregationBuilder.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.script.Script; -import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -119,7 +118,7 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg @Override protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { builder.field(BUCKETS_PATH.getPreferredName(), bucketsPathsMap); - builder.field(ScriptField.SCRIPT.getPreferredName(), script); + builder.field(Script.SCRIPT_PARSE_FIELD.getPreferredName(), script); builder.field(GAP_POLICY.getPreferredName(), gapPolicy.getName()); return builder; } @@ -141,7 +140,7 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg bucketsPathsMap.put("_value", parser.text()); } else if (context.getParseFieldMatcher().match(currentFieldName, GAP_POLICY)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); - } else if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { + } else if (context.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else { throw new ParsingException(parser.getTokenLocation(), @@ -163,7 +162,7 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_OBJECT) { - if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { + if (context.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else if (context.getParseFieldMatcher().match(currentFieldName, BUCKETS_PATH)) { Map map = parser.map(); @@ -186,7 +185,7 @@ public class BucketSelectorPipelineAggregationBuilder extends AbstractPipelineAg } if (script == null) { - throw new ParsingException(parser.getTokenLocation(), "Missing required field [" + ScriptField.SCRIPT.getPreferredName() + throw new ParsingException(parser.getTokenLocation(), "Missing required field [" + Script.SCRIPT_PARSE_FIELD.getPreferredName() + "] for bucket_selector aggregation [" + reducerName + "]"); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java index 5ffc77669b8..ded2d110206 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregationBuilder.java @@ -32,6 +32,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory; import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; @@ -141,7 +142,7 @@ public class DerivativePipelineAggregationBuilder extends AbstractPipelineAggreg } Long xAxisUnits = null; if (units != null) { - DateTimeUnit dateTimeUnit = DateHistogramAggregatorFactory.DATE_FIELD_UNITS.get(units); + DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(units); if (dateTimeUnit != null) { xAxisUnits = dateTimeUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java index 57eea9ccf65..7e8c5c1b271 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/AbstractValuesSourceParser.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.script.Script; -import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.search.aggregations.Aggregator; import org.joda.time.DateTimeZone; @@ -32,9 +31,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; -/** - * - */ public abstract class AbstractValuesSourceParser implements Aggregator.Parser { static final ParseField TIME_ZONE = new ParseField("time_zone"); @@ -136,7 +132,7 @@ public abstract class AbstractValuesSourceParser "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } } else if (scriptable && token == XContentParser.Token.START_OBJECT) { - if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { + if (context.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { script = Script.parse(parser, context.getParseFieldMatcher(), context.getDefaultScriptLanguage()); } else if (!token(aggregationName, currentFieldName, token, parserContext, otherOptions)) { throw new ParsingException(parser.getTokenLocation(), diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java index 79549f87392..64435b6df5b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java @@ -97,7 +97,7 @@ public class AggregationContext { } else { if (config.fieldContext() != null && config.fieldContext().fieldType() != null) { missing = config.fieldContext().fieldType().docValueFormat(null, DateTimeZone.UTC) - .parseDouble(config.missing().toString(), false, context::nowInMillis); + .parseDouble(config.missing().toString(), false, context.getQueryShardContext()::nowInMillis); } else { missing = Double.parseDouble(config.missing().toString()); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java index fd2f3636d17..5d26cc1b359 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java @@ -30,9 +30,6 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import java.io.IOException; import java.util.Map; -/** - * - */ public class GeoPointParser { private final InternalAggregation.Type aggType; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index 9e0bf350beb..7bcfce5a2f7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -31,9 +31,6 @@ import org.joda.time.DateTimeZone; import java.io.IOException; -/** - * - */ public enum ValueType implements Writeable { STRING((byte) 1, "string", "string", ValuesSourceType.BYTES, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 8f14a1ffaf9..1c06296f38a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -43,9 +43,6 @@ import java.io.IOException; import java.util.Collections; import java.util.Objects; -/** - * - */ public abstract class ValuesSourceAggregationBuilder> extends AbstractAggregationBuilder { @@ -376,8 +373,11 @@ public abstract class ValuesSourceAggregationBuilder { private final ValuesSourceType valueSourceType; diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index c64a5fd552e..34fb29305bd 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -106,6 +106,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ public static final ParseField PROFILE_FIELD = new ParseField("profile"); public static final ParseField SEARCH_AFTER = new ParseField("search_after"); public static final ParseField SLICE = new ParseField("slice"); + public static final ParseField ALL_FIELDS_FIELDS = new ParseField("all_fields"); public static SearchSourceBuilder fromXContent(QueryParseContext context, AggregatorParsers aggParsers, Suggesters suggesters, SearchExtRegistry searchExtRegistry) throws IOException { @@ -637,11 +638,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ * every hit */ public SearchSourceBuilder fetchSource(boolean fetch) { - if (this.fetchSourceContext == null) { - this.fetchSourceContext = new FetchSourceContext(fetch); - } else { - this.fetchSourceContext.fetchSource(fetch); - } + FetchSourceContext fetchSourceContext = this.fetchSourceContext != null ? this.fetchSourceContext + : FetchSourceContext.FETCH_SOURCE; + this.fetchSourceContext = new FetchSourceContext(fetch, fetchSourceContext.includes(), fetchSourceContext.excludes()); return this; } @@ -675,7 +674,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ * filter the returned _source */ public SearchSourceBuilder fetchSource(@Nullable String[] includes, @Nullable String[] excludes) { - fetchSourceContext = new FetchSourceContext(includes, excludes); + FetchSourceContext fetchSourceContext = this.fetchSourceContext != null ? this.fetchSourceContext + : FetchSourceContext.FETCH_SOURCE; + this.fetchSourceContext = new FetchSourceContext(fetchSourceContext.fetchSource(), includes, excludes); return this; } @@ -878,7 +879,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ * infinitely. */ public SearchSourceBuilder rewrite(QueryShardContext context) throws IOException { - assert (this.equals(shallowCopy(queryBuilder, postQueryBuilder))); + assert (this.equals(shallowCopy(queryBuilder, postQueryBuilder, sliceBuilder))); QueryBuilder queryBuilder = null; if (this.queryBuilder != null) { queryBuilder = this.queryBuilder.rewrite(context); @@ -889,40 +890,51 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } boolean rewritten = queryBuilder != this.queryBuilder || postQueryBuilder != this.postQueryBuilder; if (rewritten) { - return shallowCopy(queryBuilder, postQueryBuilder); + return shallowCopy(queryBuilder, postQueryBuilder, sliceBuilder); } return this; } - private SearchSourceBuilder shallowCopy(QueryBuilder queryBuilder, QueryBuilder postQueryBuilder) { - SearchSourceBuilder rewrittenBuilder = new SearchSourceBuilder(); - rewrittenBuilder.aggregations = aggregations; - rewrittenBuilder.explain = explain; - rewrittenBuilder.extBuilders = extBuilders; - rewrittenBuilder.fetchSourceContext = fetchSourceContext; - rewrittenBuilder.docValueFields = docValueFields; - rewrittenBuilder.storedFieldsContext = storedFieldsContext; - rewrittenBuilder.from = from; - rewrittenBuilder.highlightBuilder = highlightBuilder; - rewrittenBuilder.indexBoost = indexBoost; - rewrittenBuilder.minScore = minScore; - rewrittenBuilder.postQueryBuilder = postQueryBuilder; - rewrittenBuilder.profile = profile; - rewrittenBuilder.queryBuilder = queryBuilder; - rewrittenBuilder.rescoreBuilders = rescoreBuilders; - rewrittenBuilder.scriptFields = scriptFields; - rewrittenBuilder.searchAfterBuilder = searchAfterBuilder; - rewrittenBuilder.sliceBuilder = sliceBuilder; - rewrittenBuilder.size = size; - rewrittenBuilder.sorts = sorts; - rewrittenBuilder.stats = stats; - rewrittenBuilder.suggestBuilder = suggestBuilder; - rewrittenBuilder.terminateAfter = terminateAfter; - rewrittenBuilder.timeout = timeout; - rewrittenBuilder.trackScores = trackScores; - rewrittenBuilder.version = version; - return rewrittenBuilder; - } + /** + * Create a shallow copy of this builder with a new slice configuration. + */ + public SearchSourceBuilder copyWithNewSlice(SliceBuilder slice) { + return shallowCopy(queryBuilder, postQueryBuilder, slice); + } + + /** + * Create a shallow copy of this source replaced {@link #queryBuilder}, {@link #postQueryBuilder}, and {@linkplain slice}. Used by + * {@link #rewrite(QueryShardContext)} and {@link #copyWithNewSlice(SliceBuilder)}. + */ + private SearchSourceBuilder shallowCopy(QueryBuilder queryBuilder, QueryBuilder postQueryBuilder, SliceBuilder slice) { + SearchSourceBuilder rewrittenBuilder = new SearchSourceBuilder(); + rewrittenBuilder.aggregations = aggregations; + rewrittenBuilder.explain = explain; + rewrittenBuilder.extBuilders = extBuilders; + rewrittenBuilder.fetchSourceContext = fetchSourceContext; + rewrittenBuilder.docValueFields = docValueFields; + rewrittenBuilder.storedFieldsContext = storedFieldsContext; + rewrittenBuilder.from = from; + rewrittenBuilder.highlightBuilder = highlightBuilder; + rewrittenBuilder.indexBoost = indexBoost; + rewrittenBuilder.minScore = minScore; + rewrittenBuilder.postQueryBuilder = postQueryBuilder; + rewrittenBuilder.profile = profile; + rewrittenBuilder.queryBuilder = queryBuilder; + rewrittenBuilder.rescoreBuilders = rescoreBuilders; + rewrittenBuilder.scriptFields = scriptFields; + rewrittenBuilder.searchAfterBuilder = searchAfterBuilder; + rewrittenBuilder.sliceBuilder = slice; + rewrittenBuilder.size = size; + rewrittenBuilder.sorts = sorts; + rewrittenBuilder.stats = stats; + rewrittenBuilder.suggestBuilder = suggestBuilder; + rewrittenBuilder.terminateAfter = terminateAfter; + rewrittenBuilder.timeout = timeout; + rewrittenBuilder.trackScores = trackScores; + rewrittenBuilder.version = version; + return rewrittenBuilder; + } /** * Parse some xContent into this SearchSourceBuilder, overwriting any values specified in the xContent. Use this if you need to set up diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilderException.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilderException.java index 1c764e28346..9235a6359ff 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilderException.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilderException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class SearchSourceBuilderException extends ElasticsearchException { public SearchSourceBuilderException(String msg) { diff --git a/core/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/core/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java index 1359be24a15..6be95a8bceb 100644 --- a/core/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java +++ b/core/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java @@ -28,9 +28,11 @@ import org.apache.lucene.index.TermContext; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; import org.elasticsearch.common.collect.HppcMaps; +import org.elasticsearch.search.SearchContextException; import org.elasticsearch.search.SearchPhase; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.rescore.RescoreSearchContext; +import org.elasticsearch.tasks.TaskCancelledException; import java.util.AbstractSet; import java.util.Collection; @@ -59,6 +61,9 @@ public class DfsPhase implements SearchPhase { TermStatistics[] termStatistics = new TermStatistics[terms.length]; IndexReaderContext indexReaderContext = context.searcher().getTopReaderContext(); for (int i = 0; i < terms.length; i++) { + if(context.isCancelled()) { + throw new TaskCancelledException("cancelled"); + } // LUCENE 4 UPGRADE: cache TermContext? TermContext termContext = TermContext.build(indexReaderContext, terms[i]); termStatistics[i] = context.searcher().termStatistics(terms[i], termContext); @@ -70,6 +75,9 @@ public class DfsPhase implements SearchPhase { if (!fieldStatistics.containsKey(term.field())) { final CollectionStatistics collectionStatistics = context.searcher().collectionStatistics(term.field()); fieldStatistics.put(term.field(), collectionStatistics); + if(context.isCancelled()) { + throw new TaskCancelledException("cancelled"); + } } } diff --git a/core/src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java b/core/src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java index a6020b4498a..f493bb4d052 100644 --- a/core/src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java +++ b/core/src/main/java/org/elasticsearch/search/dfs/DfsPhaseExecutionException.java @@ -25,16 +25,17 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -/** - * - */ public class DfsPhaseExecutionException extends SearchContextException { public DfsPhaseExecutionException(SearchContext context, String msg, Throwable t) { super(context, "Dfs Failed [" + msg + "]", t); } + public DfsPhaseExecutionException(SearchContext context, String msg) { + super(context, "Dfs Failed [" + msg + "]"); + } + public DfsPhaseExecutionException(StreamInput in) throws IOException { super(in); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java b/core/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java index 6e93e410587..9a5412ffbd3 100644 --- a/core/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java @@ -34,9 +34,6 @@ import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -/** - * - */ public class DfsSearchResult extends TransportResponse implements SearchPhaseResult { private static final Term[] EMPTY_TERMS = new Term[0]; diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 41ea0e294da..dcf55872e35 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -51,6 +51,7 @@ import org.elasticsearch.search.internal.InternalSearchHitField; import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.tasks.TaskCancelledException; import java.io.IOException; import java.util.ArrayList; @@ -99,11 +100,9 @@ public class FetchPhase implements SearchPhase { } else { for (String fieldName : context.storedFieldsContext().fieldNames()) { if (fieldName.equals(SourceFieldMapper.NAME)) { - if (context.hasFetchSourceContext()) { - context.fetchSourceContext().fetchSource(true); - } else { - context.fetchSourceContext(new FetchSourceContext(true)); - } + FetchSourceContext fetchSourceContext = context.hasFetchSourceContext() ? context.fetchSourceContext() + : FetchSourceContext.FETCH_SOURCE; + context.fetchSourceContext(new FetchSourceContext(true, fetchSourceContext.includes(), fetchSourceContext.excludes())); continue; } if (Regex.isSimpleMatchPattern(fieldName)) { @@ -138,6 +137,9 @@ public class FetchPhase implements SearchPhase { InternalSearchHit[] hits = new InternalSearchHit[context.docIdsToLoadSize()]; FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); for (int index = 0; index < context.docIdsToLoadSize(); index++) { + if(context.isCancelled()) { + throw new TaskCancelledException("cancelled"); + } int docId = context.docIdsToLoad()[context.docIdsToLoadFrom() + index]; int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves()); LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhaseExecutionException.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhaseExecutionException.java index 1f56ac87142..e3fb542134e 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhaseExecutionException.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhaseExecutionException.java @@ -25,15 +25,16 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -/** - * - */ public class FetchPhaseExecutionException extends SearchContextException { public FetchPhaseExecutionException(SearchContext context, String msg, Throwable t) { super(context, "Fetch Failed [" + msg + "]", t); } + public FetchPhaseExecutionException(SearchContext context, String msg) { + super(context, "Fetch Failed [" + msg + "]"); + } + public FetchPhaseExecutionException(StreamInput in) throws IOException { super(in); } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java index eac878569e1..27dda007911 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java @@ -28,9 +28,6 @@ import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -/** - * - */ public class FetchSearchResult extends TransportResponse implements FetchSearchResultProvider { private long id; diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResultProvider.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResultProvider.java index 5f4b8101298..4b1aff991c6 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResultProvider.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResultProvider.java @@ -21,9 +21,6 @@ package org.elasticsearch.search.fetch; import org.elasticsearch.search.SearchPhaseResult; -/** - * - */ public interface FetchSearchResultProvider extends SearchPhaseResult { FetchSearchResult fetchResult(); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java b/core/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java index f3271f933fe..b618eacdb6b 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java @@ -30,9 +30,6 @@ import java.io.IOException; import static org.elasticsearch.search.fetch.FetchSearchResult.readFetchSearchResult; import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult; -/** - * - */ public class QueryFetchSearchResult extends QuerySearchResultProvider implements FetchSearchResultProvider { private QuerySearchResult queryResult; diff --git a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java index dbaee5b64bb..e8a9af00127 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java @@ -28,9 +28,6 @@ import java.io.IOException; import static org.elasticsearch.search.fetch.QueryFetchSearchResult.readQueryFetchSearchResult; -/** - * - */ public class ScrollQueryFetchSearchResult extends TransportResponse { private QueryFetchSearchResult result; diff --git a/core/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java b/core/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java index 4087eb9a01c..2148da57d39 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java @@ -22,9 +22,12 @@ package org.elasticsearch.search.fetch; import com.carrotsearch.hppc.IntArrayList; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportRequest; import java.io.IOException; @@ -106,4 +109,9 @@ public class ShardFetchRequest extends TransportRequest { Lucene.writeScoreDoc(out, lastEmittedDoc); } } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId) { + return new SearchTask(id, type, action, getDescription(), parentTaskId); + } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java index 212f8d724d8..1eec405502e 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java @@ -30,12 +30,15 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestRequest; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; +import java.util.function.Function; /** * Context used to fetch the {@code _source}. @@ -47,39 +50,13 @@ public class FetchSourceContext implements Writeable, ToXContent { public static final FetchSourceContext FETCH_SOURCE = new FetchSourceContext(true); public static final FetchSourceContext DO_NOT_FETCH_SOURCE = new FetchSourceContext(false); - private boolean fetchSource; - private String[] includes; - private String[] excludes; + private final boolean fetchSource; + private final String[] includes; + private final String[] excludes; + private Function, Map> filter; public static FetchSourceContext parse(XContentParser parser) throws IOException { - FetchSourceContext fetchSourceContext = new FetchSourceContext(); - fetchSourceContext.fromXContent(parser, ParseFieldMatcher.STRICT); - return fetchSourceContext; - } - - public FetchSourceContext() { - } - - public FetchSourceContext(boolean fetchSource) { - this(fetchSource, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY); - } - - public FetchSourceContext(String include) { - this(include, null); - } - - public FetchSourceContext(String include, String exclude) { - this(true, - include == null ? Strings.EMPTY_ARRAY : new String[]{include}, - exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}); - } - - public FetchSourceContext(String[] includes) { - this(true, includes, Strings.EMPTY_ARRAY); - } - - public FetchSourceContext(String[] includes, String[] excludes) { - this(true, includes, excludes); + return fromXContent(parser, ParseFieldMatcher.STRICT); } public FetchSourceContext(boolean fetchSource, String[] includes, String[] excludes) { @@ -88,6 +65,10 @@ public class FetchSourceContext implements Writeable, ToXContent { this.excludes = excludes == null ? Strings.EMPTY_ARRAY : excludes; } + public FetchSourceContext(boolean fetchSource) { + this(fetchSource, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY); + } + public FetchSourceContext(StreamInput in) throws IOException { fetchSource = in.readBoolean(); includes = in.readStringArray(); @@ -105,29 +86,14 @@ public class FetchSourceContext implements Writeable, ToXContent { return this.fetchSource; } - public FetchSourceContext fetchSource(boolean fetchSource) { - this.fetchSource = fetchSource; - return this; - } - public String[] includes() { return this.includes; } - public FetchSourceContext includes(String[] includes) { - this.includes = includes; - return this; - } - public String[] excludes() { return this.excludes; } - public FetchSourceContext excludes(String[] excludes) { - this.excludes = excludes; - return this; - } - public static FetchSourceContext parseFromRestRequest(RestRequest request) { Boolean fetchSource = null; String[] source_excludes = null; @@ -161,7 +127,7 @@ public class FetchSourceContext implements Writeable, ToXContent { return null; } - public void fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { + public static FetchSourceContext fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { XContentParser.Token token = parser.currentToken(); boolean fetchSource = true; String[] includes = Strings.EMPTY_ARRAY; @@ -226,9 +192,7 @@ public class FetchSourceContext implements Writeable, ToXContent { throw new ParsingException(parser.getTokenLocation(), "Expected one of [" + XContentParser.Token.VALUE_BOOLEAN + ", " + XContentParser.Token.START_OBJECT + "] but found [" + token + "]", parser.getTokenLocation()); } - this.fetchSource = fetchSource; - this.includes = includes; - this.excludes = excludes; + return new FetchSourceContext(fetchSource, includes, excludes); } @Override @@ -265,4 +229,15 @@ public class FetchSourceContext implements Writeable, ToXContent { result = 31 * result + (excludes != null ? Arrays.hashCode(excludes) : 0); return result; } + + /** + * Returns a filter function that expects the source map as an input and returns + * the filtered map. + */ + public Function, Map> getFilter() { + if (filter == null) { + filter = XContentMapValues.filter(includes, excludes); + } + return filter; + } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java index fe5a9f286c1..3171ca4b008 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java @@ -48,7 +48,7 @@ public final class FetchSourceSubPhase implements FetchSubPhase { "for index [" + context.indexShard().shardId().getIndexName() + "]"); } - Object value = source.filter(fetchSourceContext.includes(), fetchSourceContext.excludes()); + final Object value = source.filter(fetchSourceContext); try { final int initialCapacity = Math.min(1024, source.internalSourceRef().length()); BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java index 44a6b13fd40..d3de22f7203 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java @@ -59,8 +59,6 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; -/** - */ public final class InnerHitsContext { private final Map innerHits; diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java index 56223b1ec46..18eacffdc7c 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java @@ -92,7 +92,7 @@ public final class MatchedQueriesFetchSubPhase implements FetchSubPhase { } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } finally { - SearchContext.current().clearReleasables(Lifetime.COLLECTION); + context.clearReleasables(Lifetime.COLLECTION); } } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsContext.java index c886a3a157f..79bacd7f938 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsContext.java @@ -24,9 +24,6 @@ import org.elasticsearch.script.SearchScript; import java.util.ArrayList; import java.util.List; -/** - * - */ public class ScriptFieldsContext { public static class ScriptField { diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java index 873567de44e..ddafda2f5a5 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java @@ -43,9 +43,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; -/** - * - */ public class FastVectorHighlighter implements Highlighter { private static final SimpleBoundaryScanner DEFAULT_BOUNDARY_SCANNER = new SimpleBoundaryScanner(); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterContext.java index 7b9526d152f..8643ccb82ea 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterContext.java @@ -23,9 +23,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.SearchContext; -/** - * - */ public class HighlighterContext { public final String fieldName; diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java index 631d716f6f7..e821b0fd9a8 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java @@ -47,9 +47,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -/** - * - */ public class PlainHighlighter implements Highlighter { private static final String CACHE_KEY = "highlight-plain"; diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SearchContextHighlight.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SearchContextHighlight.java index 9f2074d7412..d4731718793 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SearchContextHighlight.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SearchContextHighlight.java @@ -28,9 +28,6 @@ import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; -/** - * - */ public class SearchContextHighlight { private final Map fields; diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java index e46cda49290..c4ac54606cb 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java @@ -33,9 +33,6 @@ import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; import java.util.List; -/** - * - */ public class SourceScoreOrderFragmentsBuilder extends ScoreOrderFragmentsBuilder { private final FieldMapper mapper; diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java index 4ff52547c7d..cd37863a67e 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java @@ -30,9 +30,6 @@ import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; import java.util.List; -/** - * - */ public class SourceSimpleFragmentsBuilder extends SimpleFragmentsBuilder { private final SearchContext searchContext; diff --git a/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java b/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java new file mode 100644 index 00000000000..9d22729b7a0 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Represents a {@link QueryBuilder} and a list of alias names that filters the builder is composed of. + */ +public final class AliasFilter implements Writeable { + public static final Version V_5_1_0 = Version.fromId(5010099); + private final String[] aliases; + private final QueryBuilder filter; + private final boolean reparseAliases; + + public AliasFilter(QueryBuilder filter, String... aliases) { + this.aliases = aliases == null ? Strings.EMPTY_ARRAY : aliases; + this.filter = filter; + reparseAliases = false; // no bwc here - we only do this if we parse the filter + } + + public AliasFilter(StreamInput input) throws IOException { + aliases = input.readStringArray(); + if (input.getVersion().onOrAfter(V_5_1_0)) { + filter = input.readOptionalNamedWriteable(QueryBuilder.class); + reparseAliases = false; + } else { + reparseAliases = true; // alright we read from 5.0 + filter = null; + } + } + + private QueryBuilder reparseFilter(QueryRewriteContext context) { + if (reparseAliases) { + // we are processing a filter received from a 5.0 node - we need to reparse this on the executing node + final IndexMetaData indexMetaData = context.getIndexSettings().getIndexMetaData(); + return ShardSearchRequest.parseAliasFilter(context::newParseContext, indexMetaData, aliases); + } + return filter; + } + + AliasFilter rewrite(QueryRewriteContext context) throws IOException { + QueryBuilder queryBuilder = reparseFilter(context); + if (queryBuilder != null) { + return new AliasFilter(QueryBuilder.rewriteQuery(queryBuilder, context), aliases); + } + return new AliasFilter(filter, aliases); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(aliases); + if (out.getVersion().onOrAfter(V_5_1_0)) { + out.writeOptionalNamedWriteable(filter); + } + } + + /** + * Returns the aliases patters that are used to compose the {@link QueryBuilder} + * returned from {@link #getQueryBuilder()} + */ + public String[] getAliases() { + return aliases; + } + + /** + * Returns the alias filter {@link QueryBuilder} or null if there is no such filter + */ + public QueryBuilder getQueryBuilder() { + if (reparseAliases) { + // this is only for BWC since 5.0 still only sends aliases so this must be rewritten on the executing node + // if we talk to an older node we also only forward/write the string array which is compatible with the consumers + // in 5.0 see ExplainRequest and QueryValidationRequest + throw new IllegalStateException("alias filter for aliases: " + Arrays.toString(aliases) + " must be rewritten first"); + } + return filter; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AliasFilter that = (AliasFilter) o; + return reparseAliases == that.reparseAliases && + Arrays.equals(aliases, that.aliases) && + Objects.equals(filter, that.filter); + } + + @Override + public int hashCode() { + return Objects.hash(aliases, filter, reparseAliases); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 5084f9ecad4..ec5cbf145d3 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.apache.lucene.util.Counter; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.unit.TimeValue; @@ -153,11 +154,6 @@ public abstract class FilteredSearchContext extends SearchContext { return in.getOriginNanoTime(); } - @Override - protected long nowInMillisImpl() { - return in.nowInMillisImpl(); - } - @Override public ScrollContext scrollContext() { return in.scrollContext(); @@ -263,11 +259,6 @@ public abstract class FilteredSearchContext extends SearchContext { return in.similarityService(); } - @Override - public ScriptService scriptService() { - return in.scriptService(); - } - @Override public BigArrays bigArrays() { return in.bigArrays(); @@ -303,6 +294,11 @@ public abstract class FilteredSearchContext extends SearchContext { in.terminateAfter(terminateAfter); } + @Override + public boolean lowLevelCancellation() { + return in.lowLevelCancellation(); + } + @Override public SearchContext minimumScore(float minimumScore) { return in.minimumScore(minimumScore); @@ -526,4 +522,19 @@ public abstract class FilteredSearchContext extends SearchContext { public QueryShardContext getQueryShardContext() { return in.getQueryShardContext(); } + + @Override + public void setTask(SearchTask task) { + in.setTask(task); + } + + @Override + public SearchTask getTask() { + return in.getTask(); + } + + @Override + public boolean isCancelled() { + return in.isCancelled(); + } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java index 82a74de73d2..742269dba1b 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java @@ -20,9 +20,12 @@ package org.elasticsearch.search.internal; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.Scroll; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportRequest; import java.io.IOException; @@ -67,4 +70,9 @@ public class InternalScrollSearchRequest extends TransportRequest { out.writeLong(id); out.writeOptionalWriteable(scroll); } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId) { + return new SearchTask(id, type, action, getDescription(), parentTaskId); + } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java index 227fe90ee63..e37446ba8ce 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -57,9 +57,6 @@ import static org.elasticsearch.common.lucene.Lucene.writeExplanation; import static org.elasticsearch.search.fetch.subphase.highlight.HighlightField.readHighlightField; import static org.elasticsearch.search.internal.InternalSearchHitField.readSearchHitField; -/** - * - */ public class InternalSearchHit implements SearchHit { private static final Object[] EMPTY_SORT_VALUES = new Object[0]; @@ -203,6 +200,10 @@ public class InternalSearchHit implements SearchHit { */ @Override public BytesReference sourceRef() { + if (this.source == null) { + return null; + } + try { this.source = CompressorFactory.uncompressIfNeeded(this.source); return this.source; @@ -248,7 +249,7 @@ public class InternalSearchHit implements SearchHit { @Override public boolean hasSource() { - return source == null; + return source != null; } @Override diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHitField.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHitField.java index 114aa4999d1..9214127ff06 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHitField.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHitField.java @@ -29,9 +29,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -/** - * - */ public class InternalSearchHitField implements SearchHitField { private String name; diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java index 09a787ac3cb..871d176ffcd 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java @@ -38,9 +38,6 @@ import java.util.Map; import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits; -/** - * - */ public class InternalSearchResponse implements Streamable, ToXContent { public static InternalSearchResponse empty() { diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 63a1995b08f..ce845a84169 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.apache.lucene.util.Counter; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; @@ -42,7 +43,6 @@ import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchExtBuilder; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.SearchContextAggregations; @@ -81,21 +81,7 @@ import java.util.concurrent.atomic.AtomicBoolean; // For reference why we use RefCounted here see #20095 public abstract class SearchContext extends AbstractRefCounted implements Releasable { - private static ThreadLocal current = new ThreadLocal<>(); public static final int DEFAULT_TERMINATE_AFTER = 0; - - public static void setCurrent(SearchContext value) { - current.set(value); - } - - public static void removeCurrent() { - current.remove(); - } - - public static SearchContext current() { - return current.get(); - } - private Map> clearables = null; private final AtomicBoolean closed = new AtomicBoolean(false); private InnerHitsContext innerHitsContext; @@ -111,6 +97,12 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas return parseFieldMatcher; } + public abstract void setTask(SearchTask task); + + public abstract SearchTask getTask(); + + public abstract boolean isCancelled(); + @Override public final void close() { if (closed.compareAndSet(false, true)) { // prevent double closing @@ -118,8 +110,6 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas } } - private boolean nowInMillisUsed; - @Override protected final void closeInternal() { try { @@ -162,21 +152,6 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas public abstract long getOriginNanoTime(); - public final long nowInMillis() { - nowInMillisUsed = true; - return nowInMillisImpl(); - } - - public final boolean nowInMillisUsed() { - return nowInMillisUsed; - } - - public final void resetNowInMillisUsed() { - this.nowInMillisUsed = false; - } - - protected abstract long nowInMillisImpl(); - public abstract ScrollContext scrollContext(); public abstract SearchContext scrollContext(ScrollContext scroll); @@ -238,8 +213,6 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas public abstract SimilarityService similarityService(); - public abstract ScriptService scriptService(); - public abstract BigArrays bigArrays(); public abstract BitsetFilterCache bitsetFilterCache(); @@ -254,6 +227,14 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas public abstract void terminateAfter(int terminateAfter); + /** + * Indicates if the current index should perform frequent low level search cancellation check. + * + * Enabling low-level checks will make long running searches to react to the cancellation request faster. However, + * since it will produce more cancellation checks it might slow the search performance down. + */ + public abstract boolean lowLevelCancellation(); + public abstract SearchContext minimumScore(float minimumScore); public abstract Float minimumScore(); @@ -335,7 +316,9 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas public abstract void keepAlive(long keepAlive); - public abstract SearchLookup lookup(); + public SearchLookup lookup() { + return getQueryShardContext().lookup(); + } public abstract DfsSearchResult dfsResult(); @@ -429,7 +412,11 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas result.append("searchType=[").append(searchType()).append("]"); } if (scrollContext() != null) { - result.append("scroll=[").append(scrollContext().scroll.keepAlive()).append("]"); + if (scrollContext().scroll != null) { + result.append("scroll=[").append(scrollContext().scroll.keepAlive()).append("]"); + } else { + result.append("scroll=[null]"); + } } result.append(" query=[").append(query()).append("]"); return result.toString(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 0d6148011ed..0fe10fa71cd 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.Scroll; @@ -62,7 +63,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { private SearchType searchType; private Scroll scroll; private String[] types = Strings.EMPTY_ARRAY; - private String[] filteringAliases; + private AliasFilter aliasFilter; private SearchSourceBuilder source; private Boolean requestCache; private long nowInMillis; @@ -73,29 +74,29 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { } ShardSearchLocalRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards, - String[] filteringAliases, long nowInMillis) { + AliasFilter aliasFilter, long nowInMillis) { this(shardRouting.shardId(), numberOfShards, searchRequest.searchType(), - searchRequest.source(), searchRequest.types(), searchRequest.requestCache()); + searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter); this.scroll = searchRequest.scroll(); - this.filteringAliases = filteringAliases; this.nowInMillis = nowInMillis; } - public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, String[] filteringAliases) { + public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, AliasFilter aliasFilter) { this.types = types; this.nowInMillis = nowInMillis; - this.filteringAliases = filteringAliases; + this.aliasFilter = aliasFilter; this.shardId = shardId; } public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, - Boolean requestCache) { + Boolean requestCache, AliasFilter aliasFilter) { this.shardId = shardId; this.numberOfShards = numberOfShards; this.searchType = searchType; this.source = source; this.types = types; this.requestCache = requestCache; + this.aliasFilter = aliasFilter; } @@ -130,8 +131,8 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { } @Override - public String[] filteringAliases() { - return filteringAliases; + public QueryBuilder filteringAliases() { + return aliasFilter.getQueryBuilder(); } @Override @@ -166,7 +167,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); types = in.readStringArray(); - filteringAliases = in.readStringArray(); + aliasFilter = new AliasFilter(in); nowInMillis = in.readVLong(); requestCache = in.readOptionalBoolean(); } @@ -180,7 +181,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); out.writeStringArray(types); - out.writeStringArrayNullable(filteringAliases); + aliasFilter.writeTo(out); if (!asKey) { out.writeVLong(nowInMillis); } @@ -200,6 +201,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { public void rewrite(QueryShardContext context) throws IOException { SearchSourceBuilder source = this.source; SearchSourceBuilder rewritten = null; + aliasFilter = aliasFilter.rewrite(context); while (rewritten != source) { rewritten = source.rewrite(context); source = rewritten; diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 6c237322f04..01852506cdc 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -20,13 +20,26 @@ package org.elasticsearch.search.internal; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.AliasFilterParsingException; +import org.elasticsearch.indices.InvalidAliasNameException; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; +import java.util.Optional; +import java.util.function.Function; /** * Shard level request that represents a search. @@ -47,7 +60,7 @@ public interface ShardSearchRequest { SearchType searchType(); - String[] filteringAliases(); + QueryBuilder filteringAliases(); long nowInMillis(); @@ -76,4 +89,64 @@ public interface ShardSearchRequest { * QueryBuilder. */ void rewrite(QueryShardContext context) throws IOException; + + /** + * Returns the filter associated with listed filtering aliases. + *

    + * The list of filtering aliases should be obtained by calling MetaData.filteringAliases. + * Returns null if no filtering is required.

    + */ + static QueryBuilder parseAliasFilter(Function contextFactory, + IndexMetaData metaData, String... aliasNames) { + if (aliasNames == null || aliasNames.length == 0) { + return null; + } + Index index = metaData.getIndex(); + ImmutableOpenMap aliases = metaData.getAliases(); + Function parserFunction = (alias) -> { + if (alias.filter() == null) { + return null; + } + try { + byte[] filterSource = alias.filter().uncompressed(); + try (XContentParser parser = XContentFactory.xContent(filterSource).createParser(filterSource)) { + Optional innerQueryBuilder = contextFactory.apply(parser).parseInnerQueryBuilder(); + if (innerQueryBuilder.isPresent()) { + return innerQueryBuilder.get(); + } + return null; + } + } catch (IOException ex) { + throw new AliasFilterParsingException(index, alias.getAlias(), "Invalid alias filter", ex); + } + }; + if (aliasNames.length == 1) { + AliasMetaData alias = aliases.get(aliasNames[0]); + if (alias == null) { + // This shouldn't happen unless alias disappeared after filteringAliases was called. + throw new InvalidAliasNameException(index, aliasNames[0], "Unknown alias name was passed to alias Filter"); + } + return parserFunction.apply(alias); + } else { + // we need to bench here a bit, to see maybe it makes sense to use OrFilter + BoolQueryBuilder combined = new BoolQueryBuilder(); + for (String aliasName : aliasNames) { + AliasMetaData alias = aliases.get(aliasName); + if (alias == null) { + // This shouldn't happen unless alias disappeared after filteringAliases was called. + throw new InvalidAliasNameException(index, aliasNames[0], + "Unknown alias name was passed to alias Filter"); + } + QueryBuilder parsedFilter = parserFunction.apply(alias); + if (parsedFilter != null) { + combined.should(parsedFilter); + } else { + // The filter might be null only if filter was removed after filteringAliases was called + return null; + } + } + return combined; + } + } + } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 93013b94b36..c443f1ab688 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -22,16 +22,20 @@ package org.elasticsearch.search.internal; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportRequest; import java.io.IOException; @@ -51,8 +55,8 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha } public ShardSearchTransportRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards, - String[] filteringAliases, long nowInMillis) { - this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardRouting, numberOfShards, filteringAliases, nowInMillis); + AliasFilter aliasFilter, long nowInMillis) { + this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardRouting, numberOfShards, aliasFilter, nowInMillis); this.originalIndices = new OriginalIndices(searchRequest); } @@ -104,7 +108,7 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha } @Override - public String[] filteringAliases() { + public QueryBuilder filteringAliases() { return shardSearchLocalRequest.filteringAliases(); } @@ -157,4 +161,9 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha public void rewrite(QueryShardContext context) throws IOException { shardSearchLocalRequest.rewrite(context); } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId) { + return new SearchTask(id, type, action, getDescription(), parentTaskId); + } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index 2eb2d34dd2c..f9b6aeb482a 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -342,16 +342,6 @@ public class SubSearchContext extends FilteredSearchContext { return fetchSearchResult; } - private SearchLookup searchLookup; - - @Override - public SearchLookup lookup() { - if (searchLookup == null) { - searchLookup = new SearchLookup(mapperService(), fieldData(), request().types()); - } - return searchLookup; - } - @Override public Counter timeEstimateCounter() { throw new UnsupportedOperationException("Not supported"); diff --git a/core/src/main/java/org/elasticsearch/search/lookup/DocLookup.java b/core/src/main/java/org/elasticsearch/search/lookup/DocLookup.java index 3eea11a9f5c..584abdbc1d3 100644 --- a/core/src/main/java/org/elasticsearch/search/lookup/DocLookup.java +++ b/core/src/main/java/org/elasticsearch/search/lookup/DocLookup.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MapperService; -/** - * - */ public class DocLookup { private final MapperService mapperService; diff --git a/core/src/main/java/org/elasticsearch/search/lookup/FieldLookup.java b/core/src/main/java/org/elasticsearch/search/lookup/FieldLookup.java index 249a23b9bfc..8224e453ff2 100644 --- a/core/src/main/java/org/elasticsearch/search/lookup/FieldLookup.java +++ b/core/src/main/java/org/elasticsearch/search/lookup/FieldLookup.java @@ -24,9 +24,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -/** - * - */ public class FieldLookup { // we can cached fieldType completely per name, since its on an index/shard level (the lookup, and it does not change within the scope of a search request) diff --git a/core/src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java b/core/src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java index 7cd5093ba59..feefb1fcb30 100644 --- a/core/src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java +++ b/core/src/main/java/org/elasticsearch/search/lookup/FieldsLookup.java @@ -22,9 +22,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.mapper.MapperService; -/** - * - */ public class FieldsLookup { private final MapperService mapperService; diff --git a/core/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java b/core/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java index db20a03f825..f7776f299a6 100644 --- a/core/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java +++ b/core/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java @@ -33,9 +33,6 @@ import java.util.HashMap; import java.util.Map; import java.util.Set; -/** - * - */ public class LeafDocLookup implements Map { private final Map localCacheFieldData = new HashMap<>(4); diff --git a/core/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java b/core/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java index a5f90aa2c90..374fe7189a8 100644 --- a/core/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java +++ b/core/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java @@ -34,9 +34,6 @@ import java.util.Set; import static java.util.Collections.singletonMap; -/** - * - */ public class LeafFieldsLookup implements Map { private final MapperService mapperService; diff --git a/core/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java b/core/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java index c9438fd7108..aaa2baf62ee 100644 --- a/core/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java +++ b/core/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MapperService; -/** - * - */ public class SearchLookup { final DocLookup docMap; diff --git a/core/src/main/java/org/elasticsearch/search/lookup/SourceLookup.java b/core/src/main/java/org/elasticsearch/search/lookup/SourceLookup.java index 910f5daf7a3..4cc44747d0f 100644 --- a/core/src/main/java/org/elasticsearch/search/lookup/SourceLookup.java +++ b/core/src/main/java/org/elasticsearch/search/lookup/SourceLookup.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fieldvisitor.FieldsVisitor; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.util.Collection; import java.util.List; @@ -35,9 +36,6 @@ import java.util.Set; import static java.util.Collections.emptyMap; -/** - * - */ public class SourceLookup implements Map { private LeafReader reader; @@ -130,8 +128,8 @@ public class SourceLookup implements Map { return XContentMapValues.extractRawValues(path, loadSourceIfNeeded()); } - public Object filter(String[] includes, String[] excludes) { - return XContentMapValues.filter(loadSourceIfNeeded(), includes, excludes); + public Object filter(FetchSourceContext context) { + return context.getFilter().apply(loadSourceIfNeeded()); } public Object extractValue(String path) { diff --git a/core/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java b/core/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java index c517c8730e4..6b4d7c0e842 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java +++ b/core/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java @@ -45,6 +45,7 @@ public class CollectorResult implements ToXContent, Writeable { public static final String REASON_SEARCH_MIN_SCORE = "search_min_score"; public static final String REASON_SEARCH_MULTI = "search_multi"; public static final String REASON_SEARCH_TIMEOUT = "search_timeout"; + public static final String REASON_SEARCH_CANCELLED = "search_cancelled"; public static final String REASON_AGGREGATION = "aggregation"; public static final String REASON_AGGREGATION_GLOBAL = "aggregation_global"; diff --git a/core/src/main/java/org/elasticsearch/search/query/CancellableCollector.java b/core/src/main/java/org/elasticsearch/search/query/CancellableCollector.java new file mode 100644 index 00000000000..1c702ac0e1f --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/query/CancellableCollector.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.query; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.FilterCollector; +import org.apache.lucene.search.FilterLeafCollector; +import org.apache.lucene.search.LeafCollector; +import org.elasticsearch.common.inject.Provider; +import org.elasticsearch.tasks.TaskCancelledException; + +import java.io.IOException; + +/** + * Collector that checks if the task it is executed under is cancelled. + */ +public class CancellableCollector extends FilterCollector { + private final Provider cancelled; + private final boolean leafLevel; + + /** + * Constructor + * @param cancelled supplier of the cancellation flag, the supplier will be called for each segment if lowLevelCancellation is set + * to false and for each collected record if lowLevelCancellation is set to true. In other words this class assumes + * that the supplier is fast, with performance on the order of a volatile read. + * @param lowLevelCancellation true if collector should check for cancellation for each collected record, false if check should be + * performed only once per segment + * @param in wrapped collector + */ + public CancellableCollector(Provider cancelled, boolean lowLevelCancellation, Collector in) { + super(in); + this.cancelled = cancelled; + this.leafLevel = lowLevelCancellation; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + if (cancelled.get()) { + throw new TaskCancelledException("cancelled"); + } + if (leafLevel) { + return new CancellableLeafCollector(super.getLeafCollector(context)); + } else { + return super.getLeafCollector(context); + } + } + + private class CancellableLeafCollector extends FilterLeafCollector { + private CancellableLeafCollector(LeafCollector in) { + super(in); + } + + @Override + public void collect(int doc) throws IOException { + if (cancelled.get()) { + throw new TaskCancelledException("cancelled"); + } + super.collect(doc); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java index d1e90b2e9a5..5579e55826e 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -362,6 +362,15 @@ public class QueryPhase implements SearchPhase { } } + if (collector != null) { + final Collector child = collector; + collector = new CancellableCollector(searchContext.getTask()::isCancelled, searchContext.lowLevelCancellation(), collector); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_CANCELLED, + Collections.singletonList((InternalProfileCollector) child)); + } + } + try { if (collector != null) { if (doProfile) { diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java index c90f1247e77..94d259ef525 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java +++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhaseExecutionException.java @@ -25,9 +25,6 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -/** - * - */ public class QueryPhaseExecutionException extends SearchContextException { public QueryPhaseExecutionException(SearchContext context, String msg, Throwable cause) { diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java index 15593abf0da..012d96262fc 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java @@ -22,19 +22,19 @@ package org.elasticsearch.search.query; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportRequest; import java.io.IOException; import static org.elasticsearch.search.dfs.AggregatedDfs.readAggregatedDfs; -/** - * - */ public class QuerySearchRequest extends TransportRequest implements IndicesRequest { private long id; @@ -85,4 +85,9 @@ public class QuerySearchRequest extends TransportRequest implements IndicesReque dfs.writeTo(out); OriginalIndices.writeOriginalIndices(originalIndices, out); } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId) { + return new SearchTask(id, type, action, getDescription(), parentTaskId); + } } diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java index 1ae3157fa53..cfc5ac6add6 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java +++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java @@ -22,9 +22,6 @@ package org.elasticsearch.search.query; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.transport.TransportResponse; -/** - * - */ public abstract class QuerySearchResultProvider extends TransportResponse implements SearchPhaseResult { /** diff --git a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java index bcdd94adf89..9137a72acb5 100644 --- a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java @@ -28,9 +28,6 @@ import java.io.IOException; import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult; -/** - * - */ public class ScrollQuerySearchResult extends TransportResponse { private QuerySearchResult queryResult; diff --git a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index 2a57e41cfaf..fe1b0577aa7 100644 --- a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -24,10 +24,6 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; @@ -159,6 +155,8 @@ public final class QueryRescorer implements Rescorer { // incoming first pass hits, instead of allowing recoring of just the top subset: Arrays.sort(in.scoreDocs, SCORE_DOC_COMPARATOR); } + // update the max score after the resort + in.setMaxScore(in.scoreDocs[0].score); return in; } diff --git a/core/src/main/java/org/elasticsearch/search/rescore/RescoreSearchContext.java b/core/src/main/java/org/elasticsearch/search/rescore/RescoreSearchContext.java index 6e3722f00dc..aa3c66b2fd8 100644 --- a/core/src/main/java/org/elasticsearch/search/rescore/RescoreSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/rescore/RescoreSearchContext.java @@ -21,8 +21,6 @@ package org.elasticsearch.search.rescore; -/** - */ public class RescoreSearchContext { private int windowSize; diff --git a/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java b/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java index dce83bea7e6..b9a6ca9be57 100644 --- a/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java @@ -43,9 +43,6 @@ import java.util.Arrays; import java.util.List; import java.util.Objects; -/** - * - */ public class SearchAfterBuilder implements ToXContent, Writeable { public static final ParseField SEARCH_AFTER = new ParseField("search_after"); private static final Object[] EMPTY_SORT_VALUES = new Object[0]; diff --git a/core/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/core/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 905ac8991bf..98dbadda247 100644 --- a/core/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.slice; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.ParseField; @@ -27,7 +28,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.fielddata.IndexFieldData; diff --git a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index d519f740870..e1585d708cd 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -21,11 +21,11 @@ package org.elasticsearch.search.sort; import org.apache.lucene.search.SortField; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.IndexNumericFieldData; @@ -39,17 +39,13 @@ import org.elasticsearch.search.MultiValueMode; import java.io.IOException; import java.util.Objects; -import java.util.Optional; /** * A sort builder to sort based on a document field. */ public class FieldSortBuilder extends SortBuilder { public static final String NAME = "field_sort"; - public static final ParseField NESTED_PATH = new ParseField("nested_path"); - public static final ParseField NESTED_FILTER = new ParseField("nested_filter"); public static final ParseField MISSING = new ParseField("missing"); - public static final ParseField ORDER = new ParseField("order"); public static final ParseField SORT_MODE = new ParseField("mode"); public static final ParseField UNMAPPED_TYPE = new ParseField("unmapped_type"); @@ -239,10 +235,10 @@ public class FieldSortBuilder extends SortBuilder { builder.field(SORT_MODE.getPreferredName(), sortMode); } if (nestedFilter != null) { - builder.field(NESTED_FILTER.getPreferredName(), nestedFilter, params); + builder.field(NESTED_FILTER_FIELD.getPreferredName(), nestedFilter, params); } if (nestedPath != null) { - builder.field(NESTED_PATH.getPreferredName(), nestedPath); + builder.field(NESTED_PATH_FIELD.getPreferredName(), nestedPath); } builder.endObject(); builder.endObject(); @@ -327,67 +323,17 @@ public class FieldSortBuilder extends SortBuilder { * in '{ "foo": { "order" : "asc"} }'. When parsing the inner object, the field name can be passed in via this argument */ public static FieldSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException { - XContentParser parser = context.parser(); + return PARSER.parse(context.parser(), new FieldSortBuilder(fieldName), context); + } - Optional nestedFilter = Optional.empty(); - String nestedPath = null; - Object missing = null; - SortOrder order = null; - SortMode sortMode = null; - String unmappedType = null; + private static ObjectParser PARSER = new ObjectParser<>(NAME); - String currentFieldName = null; - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if (context.getParseFieldMatcher().match(currentFieldName, NESTED_FILTER)) { - nestedFilter = context.parseInnerQueryBuilder(); - } else { - throw new ParsingException(parser.getTokenLocation(), "Expected " + NESTED_FILTER.getPreferredName() + " element."); - } - } else if (token.isValue()) { - if (context.getParseFieldMatcher().match(currentFieldName, NESTED_PATH)) { - nestedPath = parser.text(); - } else if (context.getParseFieldMatcher().match(currentFieldName, MISSING)) { - missing = parser.objectText(); - } else if (context.getParseFieldMatcher().match(currentFieldName, ORDER)) { - String sortOrder = parser.text(); - if ("asc".equals(sortOrder)) { - order = SortOrder.ASC; - } else if ("desc".equals(sortOrder)) { - order = SortOrder.DESC; - } else { - throw new ParsingException(parser.getTokenLocation(), "Sort order [{}] not supported.", sortOrder); - } - } else if (context.getParseFieldMatcher().match(currentFieldName, SORT_MODE)) { - sortMode = SortMode.fromString(parser.text()); - } else if (context.getParseFieldMatcher().match(currentFieldName, UNMAPPED_TYPE)) { - unmappedType = parser.text(); - } else { - throw new ParsingException(parser.getTokenLocation(), "Option [{}] not supported.", currentFieldName); - } - } - } - - FieldSortBuilder builder = new FieldSortBuilder(fieldName); - nestedFilter.ifPresent(builder::setNestedFilter); - if (nestedPath != null) { - builder.setNestedPath(nestedPath); - } - if (missing != null) { - builder.missing(missing); - } - if (order != null) { - builder.order(order); - } - if (sortMode != null) { - builder.sortMode(sortMode); - } - if (unmappedType != null) { - builder.unmappedType(unmappedType); - } - return builder; + static { + PARSER.declareField(FieldSortBuilder::missing, p -> p.objectText(), MISSING, ValueType.VALUE); + PARSER.declareString(FieldSortBuilder::setNestedPath , NESTED_PATH_FIELD); + PARSER.declareString(FieldSortBuilder::unmappedType , UNMAPPED_TYPE); + PARSER.declareString((b, v) -> b.order(SortOrder.fromString(v)) , ORDER_FIELD); + PARSER.declareString((b, v) -> b.sortMode(SortMode.fromString(v)), SORT_MODE); + PARSER.declareObject(FieldSortBuilder::setNestedFilter, SortBuilder::parseNestedFilter, NESTED_FILTER_FIELD); } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index f33dd0e2b15..d7b369ac257 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.sort; +import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.DocIdSetIterator; @@ -46,6 +47,7 @@ import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; import org.elasticsearch.index.fielddata.MultiGeoPointValues; import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.index.fielddata.plain.AbstractLatLonPointDVIndexFieldData.LatLonPointDVIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.GeoValidationMethod; import org.elasticsearch.index.query.QueryBuilder; @@ -78,8 +80,6 @@ public class GeoDistanceSortBuilder extends SortBuilder private static final ParseField COERCE_FIELD = new ParseField("coerce", "normalize") .withAllDeprecated("use validation_method instead"); private static final ParseField SORTMODE_FIELD = new ParseField("mode", "sort_mode"); - private static final ParseField NESTED_PATH_FIELD = new ParseField("nested_path"); - private static final ParseField NESTED_FILTER_FIELD = new ParseField("nested_filter"); private final String fieldName; private final List points = new ArrayList<>(); @@ -243,7 +243,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } /** - * The distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#KILOMETERS} + * The distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#METERS} */ public GeoDistanceSortBuilder unit(DistanceUnit unit) { this.unit = unit; @@ -251,7 +251,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } /** - * Returns the distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#KILOMETERS} + * Returns the distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#METERS} */ public DistanceUnit unit() { return this.unit; @@ -450,7 +450,7 @@ public class GeoDistanceSortBuilder extends SortBuilder geoDistance = GeoDistance.fromString(parser.text()); } else if (parseFieldMatcher.match(currentName, COERCE_FIELD)) { coerce = parser.booleanValue(); - if (coerce == true) { + if (coerce) { ignoreMalformed = true; } } else if (parseFieldMatcher.match(currentName, IGNORE_MALFORMED_FIELD)) { @@ -509,7 +509,7 @@ public class GeoDistanceSortBuilder extends SortBuilder public SortFieldAndFormat build(QueryShardContext context) throws IOException { final boolean indexCreatedBeforeV2_0 = context.indexVersionCreated().before(Version.V_2_0_0); // validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes - List localPoints = new ArrayList(); + List localPoints = new ArrayList<>(); for (GeoPoint geoPoint : this.points) { localPoints.add(new GeoPoint(geoPoint)); } @@ -550,12 +550,23 @@ public class GeoDistanceSortBuilder extends SortBuilder throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); } final IndexGeoPointFieldData geoIndexFieldData = context.getForField(fieldType); - final FixedSourceDistance[] distances = new FixedSourceDistance[localPoints.size()]; - for (int i = 0; i< localPoints.size(); i++) { - distances[i] = geoDistance.fixedSourceDistance(localPoints.get(i).lat(), localPoints.get(i).lon(), unit); + final Nested nested = resolveNested(context, nestedPath, nestedFilter); + + if (geoIndexFieldData.getClass() == LatLonPointDVIndexFieldData.class // only works with 5.x geo_point + && nested == null + && finalSortMode == MultiValueMode.MIN // LatLonDocValuesField internally picks the closest point + && unit == DistanceUnit.METERS + && reverse == false + && localPoints.size() == 1) { + return new SortFieldAndFormat( + LatLonDocValuesField.newDistanceSort(fieldName, localPoints.get(0).lat(), localPoints.get(0).lon()), + DocValueFormat.RAW); } - final Nested nested = resolveNested(context, nestedPath, nestedFilter); + final FixedSourceDistance[] distances = new FixedSourceDistance[localPoints.size()]; + for (int i = 0; i < localPoints.size(); i++) { + distances[i] = geoDistance.fixedSourceDistance(localPoints.get(i).lat(), localPoints.get(i).lon(), unit); + } IndexFieldData.XFieldComparatorSource geoDistanceComparatorSource = new IndexFieldData.XFieldComparatorSource() { @@ -573,11 +584,11 @@ public class GeoDistanceSortBuilder extends SortBuilder final SortedNumericDoubleValues distanceValues = GeoDistance.distanceValues(geoPointValues, distances); final NumericDoubleValues selectedValues; if (nested == null) { - selectedValues = finalSortMode.select(distanceValues, Double.MAX_VALUE); + selectedValues = finalSortMode.select(distanceValues, Double.POSITIVE_INFINITY); } else { final BitSet rootDocs = nested.rootDocs(context); final DocIdSetIterator innerDocs = nested.innerDocs(context); - selectedValues = finalSortMode.select(distanceValues, Double.MAX_VALUE, rootDocs, innerDocs, + selectedValues = finalSortMode.select(distanceValues, Double.POSITIVE_INFINITY, rootDocs, innerDocs, context.reader().maxDoc()); } return selectedValues.getRawDoubleValues(); diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java index 5b9b139e495..52f25301783 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java @@ -20,13 +20,10 @@ package org.elasticsearch.search.sort; import org.apache.lucene.search.SortField; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; @@ -40,7 +37,6 @@ import java.util.Objects; public class ScoreSortBuilder extends SortBuilder { public static final String NAME = "_score"; - public static final ParseField ORDER_FIELD = new ParseField("order"); private static final SortFieldAndFormat SORT_SCORE = new SortFieldAndFormat( new SortField(null, SortField.Type.SCORE), DocValueFormat.RAW); private static final SortFieldAndFormat SORT_SCORE_REVERSE = new SortFieldAndFormat( @@ -86,26 +82,13 @@ public class ScoreSortBuilder extends SortBuilder { * in '{ "foo": { "order" : "asc"} }'. When parsing the inner object, the field name can be passed in via this argument */ public static ScoreSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException { - XContentParser parser = context.parser(); - ParseFieldMatcher matcher = context.getParseFieldMatcher(); + return PARSER.apply(context.parser(), context); + } - XContentParser.Token token; - String currentName = parser.currentName(); - ScoreSortBuilder result = new ScoreSortBuilder(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentName = parser.currentName(); - } else if (token.isValue()) { - if (matcher.match(currentName, ORDER_FIELD)) { - result.order(SortOrder.fromString(parser.text())); - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] unexpected token [" + token + "]"); - } - } - return result; + private static ObjectParser PARSER = new ObjectParser<>(NAME, ScoreSortBuilder::new); + + static { + PARSER.declareString((builder, order) -> builder.order(SortOrder.fromString(order)), ORDER_FIELD); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 0a7cb5e1b36..6da93b26b8c 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -26,13 +26,12 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -47,19 +46,16 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.Script; -import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.MultiValueMode; import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; import java.util.Locale; -import java.util.Map; import java.util.Objects; -import java.util.Optional; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; /** * Script sort builder allows to sort based on a custom script expression. @@ -70,8 +66,6 @@ public class ScriptSortBuilder extends SortBuilder { public static final ParseField TYPE_FIELD = new ParseField("type"); public static final ParseField SCRIPT_FIELD = new ParseField("script"); public static final ParseField SORTMODE_FIELD = new ParseField("mode"); - public static final ParseField NESTED_PATH_FIELD = new ParseField("nested_path"); - public static final ParseField NESTED_FILTER_FIELD = new ParseField("nested_filter"); private final Script script; @@ -218,6 +212,18 @@ public class ScriptSortBuilder extends SortBuilder { return builder; } + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new ScriptSortBuilder((Script) a[0], (ScriptSortType) a[1])); + + static { + PARSER.declareField(constructorArg(), Script::parse, Script.SCRIPT_PARSE_FIELD, ValueType.OBJECT_OR_STRING); + PARSER.declareField(constructorArg(), p -> ScriptSortType.fromString(p.text()), TYPE_FIELD, ValueType.STRING); + PARSER.declareString((b, v) -> b.order(SortOrder.fromString(v)), ORDER_FIELD); + PARSER.declareString((b, v) -> b.sortMode(SortMode.fromString(v)), SORTMODE_FIELD); + PARSER.declareString(ScriptSortBuilder::setNestedPath , NESTED_PATH_FIELD); + PARSER.declareObject(ScriptSortBuilder::setNestedFilter, SortBuilder::parseNestedFilter, NESTED_FILTER_FIELD); + } + /** * Creates a new {@link ScriptSortBuilder} from the query held by the {@link QueryParseContext} in * {@link org.elasticsearch.common.xcontent.XContent} format. @@ -228,66 +234,13 @@ public class ScriptSortBuilder extends SortBuilder { * in '{ "foo": { "order" : "asc"} }'. When parsing the inner object, the field name can be passed in via this argument */ public static ScriptSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException { - XContentParser parser = context.parser(); - ParseFieldMatcher parseField = context.getParseFieldMatcher(); - Script script = null; - ScriptSortType type = null; - SortMode sortMode = null; - SortOrder order = null; - Optional nestedFilter = Optional.empty(); - String nestedPath = null; - - XContentParser.Token token; - String currentName = parser.currentName(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if (parseField.match(currentName, ScriptField.SCRIPT)) { - script = Script.parse(parser, parseField, context.getDefaultScriptLanguage()); - } else if (parseField.match(currentName, NESTED_FILTER_FIELD)) { - nestedFilter = context.parseInnerQueryBuilder(); - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); - } - } else if (token.isValue()) { - if (parseField.match(currentName, ORDER_FIELD)) { - order = SortOrder.fromString(parser.text()); - } else if (parseField.match(currentName, TYPE_FIELD)) { - type = ScriptSortType.fromString(parser.text()); - } else if (parseField.match(currentName, SORTMODE_FIELD)) { - sortMode = SortMode.fromString(parser.text()); - } else if (parseField.match(currentName, NESTED_PATH_FIELD)) { - nestedPath = parser.text(); - } else if (parseField.match(currentName, ScriptField.SCRIPT)) { - script = Script.parse(parser, parseField, context.getDefaultScriptLanguage()); - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] unexpected token [" + token + "]"); - } - } - - ScriptSortBuilder result = new ScriptSortBuilder(script, type); - if (order != null) { - result.order(order); - } - if (sortMode != null) { - result.sortMode(sortMode); - } - nestedFilter.ifPresent(result::setNestedFilter); - if (nestedPath != null) { - result.setNestedPath(nestedPath); - } - return result; + return PARSER.apply(context.parser(), context); } @Override public SortFieldAndFormat build(QueryShardContext context) throws IOException { - final SearchScript searchScript = context.getScriptService().search( - context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); + final SearchScript searchScript = context.getSearchScript(script, ScriptContext.Standard.SEARCH); MultiValueMode valueMode = null; if (sortMode != null) { diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index c8f15f3a1e8..365c1f9fe4c 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitSetProducer; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; @@ -46,13 +47,14 @@ import java.util.Optional; import static java.util.Collections.unmodifiableMap; -/** - * - */ public abstract class SortBuilder> extends ToXContentToBytes implements NamedWriteable { protected SortOrder order = SortOrder.ASC; + + // parse fields common to more than one SortBuilder public static final ParseField ORDER_FIELD = new ParseField("order"); + public static final ParseField NESTED_FILTER_FIELD = new ParseField("nested_filter"); + public static final ParseField NESTED_PATH_FIELD = new ParseField("nested_path"); private static final Map> PARSERS; static { @@ -199,6 +201,16 @@ public abstract class SortBuilder> extends ToXContentTo return nested; } + protected static QueryBuilder parseNestedFilter(XContentParser parser, QueryParseContext context) { + try { + QueryBuilder builder = context.parseInnerQueryBuilder().orElseThrow(() -> new ParsingException(parser.getTokenLocation(), + "Expected " + NESTED_FILTER_FIELD.getPreferredName() + " element.")); + return builder; + } catch (Exception e) { + throw new ParsingException(parser.getTokenLocation(), "Expected " + NESTED_FILTER_FIELD.getPreferredName() + " element.", e); + } + } + @FunctionalInterface private interface Parser> { T fromXContent(QueryParseContext context, String elementName) throws IOException; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java index ece5a9c58bf..165f70ba3c0 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java @@ -24,8 +24,7 @@ import org.elasticsearch.index.query.QueryShardContext; import java.util.LinkedHashMap; import java.util.Map; -/** - */ + public class SuggestionSearchContext { private final Map suggestions = new LinkedHashMap<>(4); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java index c8ed562f9cc..efea5915766 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java @@ -29,9 +29,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -/** - * - */ public class CompletionStats implements Streamable, ToXContent { private long sizeInBytes; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java index 84eb9de6a59..273aeb31717 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java @@ -32,9 +32,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; -/** - * - */ public class CompletionSuggestionContext extends SuggestionSearchContext.SuggestionContext { protected CompletionSuggestionContext(QueryShardContext shardContext) { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion2x/CompletionSuggestion.java b/core/src/main/java/org/elasticsearch/search/suggest/completion2x/CompletionSuggestion.java index cf0939b85f9..50518ee0eff 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion2x/CompletionSuggestion.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion2x/CompletionSuggestion.java @@ -31,9 +31,6 @@ import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; import java.util.Map; -/** - * - */ public class CompletionSuggestion extends Suggest.Suggestion { public static final int TYPE = 2; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion2x/CompletionTokenStream.java b/core/src/main/java/org/elasticsearch/search/suggest/completion2x/CompletionTokenStream.java index 9cb9891add0..de81caa7e5d 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion2x/CompletionTokenStream.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion2x/CompletionTokenStream.java @@ -35,9 +35,6 @@ import java.io.IOException; import java.util.Iterator; import java.util.Set; -/** - * - */ public final class CompletionTokenStream extends TokenStream { private final PayloadAttribute payloadAttr = addAttribute(PayloadAttribute.class); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java index bf9158f9b87..4132247363a 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java @@ -29,10 +29,9 @@ import org.apache.lucene.search.spell.SuggestMode; import org.apache.lucene.util.automaton.LevenshteinAutomata; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.MapperService; @@ -41,10 +40,8 @@ import org.elasticsearch.search.suggest.SortBy; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.CandidateGenerator; import java.io.IOException; -import java.util.HashSet; import java.util.Locale; import java.util.Objects; -import java.util.Set; import java.util.function.Consumer; public final class DirectCandidateGeneratorBuilder implements CandidateGenerator { @@ -89,30 +86,6 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator this.field = field; } - /** - * Quasi copy-constructor that takes all values from the generator - * passed in, but uses different field name. Needed by parser because we - * need to buffer the field name but read all other properties to a - * temporary object. - */ - private static DirectCandidateGeneratorBuilder replaceField(String field, DirectCandidateGeneratorBuilder other) { - DirectCandidateGeneratorBuilder generator = new DirectCandidateGeneratorBuilder(field); - generator.preFilter = other.preFilter; - generator.postFilter = other.postFilter; - generator.suggestMode = other.suggestMode; - generator.accuracy = other.accuracy; - generator.size = other.size; - generator.sort = other.sort; - generator.stringDistance = other.stringDistance; - generator.maxEdits = other.maxEdits; - generator.maxInspections = other.maxInspections; - generator.maxTermFreq = other.maxTermFreq; - generator.prefixLength = other.prefixLength; - generator.minWordLength = other.minWordLength; - generator.minDocFreq = other.minDocFreq; - return generator; - } - /** * Read from a stream. */ @@ -151,6 +124,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator out.writeOptionalString(postFilter); } + String field() { + return this.field; + } + /** * The global suggest mode controls what suggested terms are included or * controls for what suggest text tokens, terms should be suggested for. @@ -169,6 +146,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + String suggestMode() { + return suggestMode; + } + /** * Sets how similar the suggested terms at least need to be compared to * the original suggest text tokens. A value between 0 and 1 can be @@ -182,6 +163,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + Float accuracy() { + return this.accuracy; + } + /** * Sets the maximum suggestions to be returned per suggest text term. */ @@ -193,6 +178,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + Integer size() { + return size; + } + /** * Sets how to sort the suggest terms per suggest text token. Two * possible values: @@ -210,6 +199,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + String sort() { + return sort; + } + /** * Sets what string distance implementation to use for comparing how * similar suggested terms are. Four possible values can be specified: @@ -232,6 +225,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + String stringDistance() { + return stringDistance; + } + /** * Sets the maximum edit distance candidate suggestions can have in * order to be considered as a suggestion. Can only be a value between 1 @@ -246,6 +243,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + Integer maxEdits() { + return maxEdits; + } + /** * A factor that is used to multiply with the size in order to inspect * more candidate suggestions. Can improve accuracy at the cost of @@ -256,6 +257,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + Integer maxInspections() { + return maxInspections; + } + /** * Sets a maximum threshold in number of documents a suggest text token * can exist in order to be corrected. Can be a relative percentage @@ -272,6 +277,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + Float maxTermFreq() { + return maxTermFreq; + } + /** * Sets the number of minimal prefix characters that must match in order * be a candidate suggestion. Defaults to 1. Increasing this number @@ -283,6 +292,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + Integer prefixLength() { + return prefixLength; + } + /** * The minimum length a suggest text term must have in order to be * corrected. Defaults to 4. @@ -292,6 +305,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + Integer minWordLength() { + return minWordLength; + } + /** * Sets a minimal threshold in number of documents a suggested term * should appear in. This can be specified as an absolute number or as a @@ -305,6 +322,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + Float minDocFreq() { + return minDocFreq; + } + /** * Sets a filter (analyzer) that is applied to each of the tokens passed to this candidate generator. * This filter is applied to the original token before candidates are generated. @@ -314,6 +335,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + String preFilter() { + return preFilter; + } + /** * Sets a filter (analyzer) that is applied to each of the generated tokens * before they are passed to the actual phrase scorer. @@ -323,6 +348,10 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator return this; } + String postFilter() { + return postFilter; + } + /** * gets the type identifier of this {@link CandidateGenerator} */ @@ -358,35 +387,28 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator } } - private static ObjectParser, DirectCandidateGeneratorBuilder>, QueryParseContext> PARSER = new ObjectParser<>(TYPE); + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + TYPE, args -> new DirectCandidateGeneratorBuilder((String) args[0])); static { - PARSER.declareString((tp, s) -> tp.v1().add(s), FIELDNAME_FIELD); - PARSER.declareString((tp, s) -> tp.v2().preFilter(s), PREFILTER_FIELD); - PARSER.declareString((tp, s) -> tp.v2().postFilter(s), POSTFILTER_FIELD); - PARSER.declareString((tp, s) -> tp.v2().suggestMode(s), SUGGESTMODE_FIELD); - PARSER.declareFloat((tp, f) -> tp.v2().minDocFreq(f), MIN_DOC_FREQ_FIELD); - PARSER.declareFloat((tp, f) -> tp.v2().accuracy(f), ACCURACY_FIELD); - PARSER.declareInt((tp, i) -> tp.v2().size(i), SIZE_FIELD); - PARSER.declareString((tp, s) -> tp.v2().sort(s), SORT_FIELD); - PARSER.declareString((tp, s) -> tp.v2().stringDistance(s), STRING_DISTANCE_FIELD); - PARSER.declareInt((tp, i) -> tp.v2().maxInspections(i), MAX_INSPECTIONS_FIELD); - PARSER.declareFloat((tp, f) -> tp.v2().maxTermFreq(f), MAX_TERM_FREQ_FIELD); - PARSER.declareInt((tp, i) -> tp.v2().maxEdits(i), MAX_EDITS_FIELD); - PARSER.declareInt((tp, i) -> tp.v2().minWordLength(i), MIN_WORD_LENGTH_FIELD); - PARSER.declareInt((tp, i) -> tp.v2().prefixLength(i), PREFIX_LENGTH_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), FIELDNAME_FIELD); + PARSER.declareString(DirectCandidateGeneratorBuilder::preFilter, PREFILTER_FIELD); + PARSER.declareString(DirectCandidateGeneratorBuilder::postFilter, POSTFILTER_FIELD); + PARSER.declareString(DirectCandidateGeneratorBuilder::suggestMode, SUGGESTMODE_FIELD); + PARSER.declareFloat(DirectCandidateGeneratorBuilder::minDocFreq, MIN_DOC_FREQ_FIELD); + PARSER.declareFloat(DirectCandidateGeneratorBuilder::accuracy, ACCURACY_FIELD); + PARSER.declareInt(DirectCandidateGeneratorBuilder::size, SIZE_FIELD); + PARSER.declareString(DirectCandidateGeneratorBuilder::sort, SORT_FIELD); + PARSER.declareString(DirectCandidateGeneratorBuilder::stringDistance, STRING_DISTANCE_FIELD); + PARSER.declareInt(DirectCandidateGeneratorBuilder::maxInspections, MAX_INSPECTIONS_FIELD); + PARSER.declareFloat(DirectCandidateGeneratorBuilder::maxTermFreq, MAX_TERM_FREQ_FIELD); + PARSER.declareInt(DirectCandidateGeneratorBuilder::maxEdits, MAX_EDITS_FIELD); + PARSER.declareInt(DirectCandidateGeneratorBuilder::minWordLength, MIN_WORD_LENGTH_FIELD); + PARSER.declareInt(DirectCandidateGeneratorBuilder::prefixLength, PREFIX_LENGTH_FIELD); } public static DirectCandidateGeneratorBuilder fromXContent(QueryParseContext parseContext) throws IOException { - DirectCandidateGeneratorBuilder tempGenerator = new DirectCandidateGeneratorBuilder("_na_"); - // bucket for the field name, needed as constructor arg later - Set tmpFieldName = new HashSet<>(1); - PARSER.parse(parseContext.parser(), new Tuple<>(tmpFieldName, tempGenerator), - parseContext); - if (tmpFieldName.size() != 1) { - throw new IllegalArgumentException("[" + TYPE + "] expects exactly one field parameter, but found " + tmpFieldName); - } - return replaceField(tmpFieldName.iterator().next(), tempGenerator); + return PARSER.apply(parseContext.parser(), parseContext); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index a9f5accb918..41168852d2c 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -55,6 +55,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.function.Function; public final class PhraseSuggester extends Suggester { private final BytesRef SEPARATOR = new BytesRef(" "); @@ -109,7 +110,7 @@ public final class PhraseSuggester extends Suggester { response.addTerm(resultEntry); final BytesRefBuilder byteSpare = new BytesRefBuilder(); - final CompiledScript collateScript = suggestion.getCollateQueryScript(); + final Function, ExecutableScript> collateScript = suggestion.getCollateQueryScript(); final boolean collatePrune = (collateScript != null) && suggestion.collatePrune(); for (int i = 0; i < checkerResult.corrections.length; i++) { Correction correction = checkerResult.corrections[i]; @@ -121,8 +122,7 @@ public final class PhraseSuggester extends Suggester { final Map vars = suggestion.getCollateScriptParams(); vars.put(SUGGESTION_TEMPLATE_VAR_NAME, spare.toString()); QueryShardContext shardContext = suggestion.getShardContext(); - ScriptService scriptService = shardContext.getScriptService(); - final ExecutableScript executable = scriptService.executable(collateScript, vars); + final ExecutableScript executable = collateScript.apply(vars); final BytesReference querySource = (BytesReference) executable.run(); try (XContentParser parser = XContentFactory.xContent(querySource).createParser(querySource)) { Optional innerQueryBuilder = shardContext.newParseContext(parser).parseInnerQueryBuilder(); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index b0cd6a20499..969b1c24d5c 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -39,10 +39,10 @@ import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.suggest.SuggestionBuilder; import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCandidateGenerator; @@ -56,6 +56,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; +import java.util.function.Function; /** * Defines the actual suggest command for phrase suggestions ( phrase). @@ -392,7 +393,7 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder, ExecutableScript> compiledScript = context.getLazyExecutableScript(this.collateQuery, + ScriptContext.Standard.SEARCH); suggestionContext.setCollateQueryScript(compiledScript); if (this.collateParams != null) { suggestionContext.setCollateScriptParams(this.collateParams); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java index e2d7ff1c41d..960dca419f7 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java @@ -24,6 +24,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.search.suggest.DirectSpellcheckerSettings; import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; @@ -31,6 +32,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Function; class PhraseSuggestionContext extends SuggestionContext { static final boolean DEFAULT_COLLATE_PRUNE = false; @@ -52,7 +54,7 @@ class PhraseSuggestionContext extends SuggestionContext { private boolean requireUnigram = DEFAULT_REQUIRE_UNIGRAM; private BytesRef preTag; private BytesRef postTag; - private CompiledScript collateQueryScript; + private Function, ExecutableScript> collateQueryScript; private boolean prune = DEFAULT_COLLATE_PRUNE; private List generators = new ArrayList<>(); private Map collateScriptParams = new HashMap<>(1); @@ -192,11 +194,11 @@ class PhraseSuggestionContext extends SuggestionContext { return postTag; } - CompiledScript getCollateQueryScript() { + Function, ExecutableScript> getCollateQueryScript() { return collateQueryScript; } - void setCollateQueryScript(CompiledScript collateQueryScript) { + void setCollateQueryScript( Function, ExecutableScript> collateQueryScript) { this.collateQueryScript = collateQueryScript; } diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 5e30a3b52b3..ed6317fcb30 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -22,6 +22,7 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Version; @@ -30,6 +31,9 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.RestoreInProgress.ShardRestoreStatus; @@ -41,41 +45,32 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.cluster.metadata.RepositoriesMetaData; -import org.elasticsearch.cluster.routing.IndexRoutingTable; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; +import org.elasticsearch.cluster.routing.RoutingChangesObserver; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.EmptyTransportResponseHandler; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportService; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -83,12 +78,10 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Objects; import java.util.Optional; import java.util.Set; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CopyOnWriteArrayList; +import java.util.stream.Collectors; import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS; @@ -117,14 +110,12 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; * method, which detects that shard should be restored from snapshot rather than recovered from gateway by looking * at the {@link ShardRouting#recoverySource()} property. *

    - * At the end of the successful restore process {@code IndexShardSnapshotAndRestoreService} calls {@link #indexShardRestoreCompleted(Snapshot, ShardId)}, - * which updates {@link RestoreInProgress} in cluster state or removes it when all shards are completed. In case of + * At the end of the successful restore process {@code RestoreService} calls {@link #cleanupRestoreState(ClusterChangedEvent)}, + * which removes {@link RestoreInProgress} when all shards are completed. In case of * restore failure a normal recovery fail-over process kicks in. */ public class RestoreService extends AbstractComponent implements ClusterStateListener { - public static final String UPDATE_RESTORE_ACTION_NAME = "internal:cluster/snapshot/update_restore"; - private static final Set UNMODIFIABLE_SETTINGS = unmodifiableSet(newHashSet( SETTING_NUMBER_OF_SHARDS, SETTING_VERSION_CREATED, @@ -148,33 +139,29 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis private final RepositoriesService repositoriesService; - private final TransportService transportService; - private final AllocationService allocationService; private final MetaDataCreateIndexService createIndexService; private final MetaDataIndexUpgradeService metaDataIndexUpgradeService; - private final CopyOnWriteArrayList> listeners = new CopyOnWriteArrayList<>(); - - private final BlockingQueue updatedSnapshotStateQueue = ConcurrentCollections.newBlockingQueue(); private final ClusterSettings clusterSettings; + private final CleanRestoreStateTaskExecutor cleanRestoreStateTaskExecutor; + @Inject - public RestoreService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, TransportService transportService, + public RestoreService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, AllocationService allocationService, MetaDataCreateIndexService createIndexService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, ClusterSettings clusterSettings) { super(settings); this.clusterService = clusterService; this.repositoriesService = repositoriesService; - this.transportService = transportService; this.allocationService = allocationService; this.createIndexService = createIndexService; this.metaDataIndexUpgradeService = metaDataIndexUpgradeService; - transportService.registerRequestHandler(UPDATE_RESTORE_ACTION_NAME, UpdateIndexShardRestoreStatusRequest::new, ThreadPool.Names.SAME, new UpdateRestoreStateRequestHandler()); clusterService.add(this); this.clusterSettings = clusterSettings; + this.cleanRestoreStateTaskExecutor = new CleanRestoreStateTaskExecutor(logger); } /** @@ -183,7 +170,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis * @param request restore request * @param listener restore listener */ - public void restoreSnapshot(final RestoreRequest request, final ActionListener listener) { + public void restoreSnapshot(final RestoreRequest request, final ActionListener listener) { try { // Read snapshot info and metadata from the repository Repository repository = repositoriesService.repository(request.repositoryName); @@ -314,7 +301,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis } shards = shardsBuilder.build(); - RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshot, RestoreInProgress.State.INIT, Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards); + RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshot, overallState(RestoreInProgress.State.INIT, shards), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards); builder.putCustom(RestoreInProgress.TYPE, new RestoreInProgress(restoreEntry)); } else { shards = ImmutableOpenMap.of(); @@ -469,7 +456,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - listener.onResponse(restoreInfo); + listener.onResponse(new RestoreCompletionResponse(snapshot, restoreInfo)); } }); @@ -480,19 +467,33 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis } } - /** - * This method is used by {@link IndexShard} to notify - * {@code RestoreService} about shard restore completion. - * - * @param snapshot snapshot - * @param shardId shard id - */ - public void indexShardRestoreCompleted(Snapshot snapshot, ShardId shardId) { - logger.trace("[{}] successfully restored shard [{}]", snapshot, shardId); - UpdateIndexShardRestoreStatusRequest request = new UpdateIndexShardRestoreStatusRequest(snapshot, shardId, - new ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), RestoreInProgress.State.SUCCESS)); - transportService.sendRequest(clusterService.state().nodes().getMasterNode(), - UPDATE_RESTORE_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); + public static RestoreInProgress updateRestoreStateWithDeletedIndices(RestoreInProgress oldRestore, Set deletedIndices) { + boolean changesMade = false; + final List entries = new ArrayList<>(); + for (RestoreInProgress.Entry entry : oldRestore.entries()) { + ImmutableOpenMap.Builder shardsBuilder = null; + for (ObjectObjectCursor cursor : entry.shards()) { + ShardId shardId = cursor.key; + if (deletedIndices.contains(shardId.getIndex())) { + changesMade = true; + if (shardsBuilder == null) { + shardsBuilder = ImmutableOpenMap.builder(entry.shards()); + } + shardsBuilder.put(shardId, new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE, "index was deleted")); + } + } + if (shardsBuilder != null) { + ImmutableOpenMap shards = shardsBuilder.build(); + entries.add(new RestoreInProgress.Entry(entry.snapshot(), overallState(RestoreInProgress.State.STARTED, shards), entry.indices(), shards)); + } else { + entries.add(entry); + } + } + if (changesMade) { + return new RestoreInProgress(entries.toArray(new RestoreInProgress.Entry[entries.size()])); + } else { + return oldRestore; + } } public static final class RestoreCompletionResponse { @@ -513,168 +514,201 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis } } - /** - * Updates shard restore record in the cluster state. - * - * @param request update shard status request - */ - private void updateRestoreStateOnMaster(final UpdateIndexShardRestoreStatusRequest request) { - logger.trace("received updated snapshot restore state [{}]", request); - updatedSnapshotStateQueue.add(request); + public static class RestoreInProgressUpdater extends RoutingChangesObserver.AbstractRoutingChangesObserver { + private final Map shardChanges = new HashMap<>(); - clusterService.submitStateUpdateTask("update snapshot state", new ClusterStateUpdateTask() { - private final List drainedRequests = new ArrayList<>(); - private Map>> batchedRestoreInfo = null; - - @Override - public ClusterState execute(ClusterState currentState) { - - if (request.processed) { - return currentState; + @Override + public void shardStarted(ShardRouting initializingShard, ShardRouting startedShard) { + // mark snapshot as completed + if (initializingShard.primary()) { + RecoverySource recoverySource = initializingShard.recoverySource(); + if (recoverySource.getType() == RecoverySource.Type.SNAPSHOT) { + Snapshot snapshot = ((SnapshotRecoverySource) recoverySource).snapshot(); + changes(snapshot).startedShards.put(initializingShard.shardId(), + new ShardRestoreStatus(initializingShard.currentNodeId(), RestoreInProgress.State.SUCCESS)); } + } + } - updatedSnapshotStateQueue.drainTo(drainedRequests); - - final int batchSize = drainedRequests.size(); - - // nothing to process (a previous event has processed it already) - if (batchSize == 0) { - return currentState; + @Override + public void shardFailed(ShardRouting failedShard, UnassignedInfo unassignedInfo) { + if (failedShard.primary() && failedShard.initializing()) { + RecoverySource recoverySource = failedShard.recoverySource(); + if (recoverySource.getType() == RecoverySource.Type.SNAPSHOT) { + Snapshot snapshot = ((SnapshotRecoverySource) recoverySource).snapshot(); + // mark restore entry for this shard as failed when it's due to a file corruption. There is no need wait on retries + // to restore this shard on another node if the snapshot files are corrupt. In case where a node just left or crashed, + // however, we only want to acknowledge the restore operation once it has been successfully restored on another node. + if (unassignedInfo.getFailure() != null && Lucene.isCorruptionException(unassignedInfo.getFailure().getCause())) { + changes(snapshot).failedShards.put(failedShard.shardId(), new ShardRestoreStatus(failedShard.currentNodeId(), + RestoreInProgress.State.FAILURE, unassignedInfo.getFailure().getCause().getMessage())); + } } + } + } - final RestoreInProgress restore = currentState.custom(RestoreInProgress.TYPE); - if (restore != null) { - int changedCount = 0; - final List entries = new ArrayList<>(); - for (RestoreInProgress.Entry entry : restore.entries()) { - ImmutableOpenMap.Builder shardsBuilder = null; + @Override + public void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) { + // if we force an empty primary, we should also fail the restore entry + if (unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT && + initializedShard.recoverySource().getType() != RecoverySource.Type.SNAPSHOT) { + Snapshot snapshot = ((SnapshotRecoverySource) unassignedShard.recoverySource()).snapshot(); + changes(snapshot).failedShards.put(unassignedShard.shardId(), new ShardRestoreStatus(null, + RestoreInProgress.State.FAILURE, "recovery source type changed from snapshot to " + initializedShard.recoverySource())); + } + } - for (int i = 0; i < batchSize; i++) { - final UpdateIndexShardRestoreStatusRequest updateSnapshotState = drainedRequests.get(i); - updateSnapshotState.processed = true; + /** + * Helper method that creates update entry for the given shard id if such an entry does not exist yet. + */ + private Updates changes(Snapshot snapshot) { + return shardChanges.computeIfAbsent(snapshot, k -> new Updates()); + } - if (entry.snapshot().equals(updateSnapshotState.snapshot())) { - logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot(), updateSnapshotState.shardId(), updateSnapshotState.status().state()); - if (shardsBuilder == null) { - shardsBuilder = ImmutableOpenMap.builder(entry.shards()); - } - shardsBuilder.put(updateSnapshotState.shardId(), updateSnapshotState.status()); - changedCount++; - } + private static class Updates { + private Map failedShards = new HashMap<>(); + private Map startedShards = new HashMap<>(); + } + + public RestoreInProgress applyChanges(RestoreInProgress oldRestore) { + if (shardChanges.isEmpty() == false) { + final List entries = new ArrayList<>(); + for (RestoreInProgress.Entry entry : oldRestore.entries()) { + Snapshot snapshot = entry.snapshot(); + Updates updates = shardChanges.get(snapshot); + assert Sets.haveEmptyIntersection(updates.startedShards.keySet(), updates.failedShards.keySet()); + if (updates.startedShards.isEmpty() == false || updates.failedShards.isEmpty() == false) { + ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(entry.shards()); + for (Map.Entry startedShardEntry : updates.startedShards.entrySet()) { + shardsBuilder.put(startedShardEntry.getKey(), startedShardEntry.getValue()); } - - if (shardsBuilder != null) { - ImmutableOpenMap shards = shardsBuilder.build(); - if (!completed(shards)) { - entries.add(new RestoreInProgress.Entry(entry.snapshot(), RestoreInProgress.State.STARTED, entry.indices(), shards)); - } else { - logger.info("restore [{}] is done", entry.snapshot()); - if (batchedRestoreInfo == null) { - batchedRestoreInfo = new HashMap<>(); - } - assert !batchedRestoreInfo.containsKey(entry.snapshot()); - batchedRestoreInfo.put(entry.snapshot(), - new Tuple<>( - new RestoreInfo(entry.snapshot().getSnapshotId().getName(), - entry.indices(), - shards.size(), - shards.size() - failedShards(shards)), - shards)); - } - } else { - entries.add(entry); + for (Map.Entry failedShardEntry : updates.failedShards.entrySet()) { + shardsBuilder.put(failedShardEntry.getKey(), failedShardEntry.getValue()); } - } - - if (changedCount > 0) { - logger.trace("changed cluster state triggered by {} snapshot restore state updates", changedCount); - - final RestoreInProgress updatedRestore = new RestoreInProgress(entries.toArray(new RestoreInProgress.Entry[entries.size()])); - return ClusterState.builder(currentState).putCustom(RestoreInProgress.TYPE, updatedRestore).build(); + ImmutableOpenMap shards = shardsBuilder.build(); + RestoreInProgress.State newState = overallState(RestoreInProgress.State.STARTED, shards); + entries.add(new RestoreInProgress.Entry(entry.snapshot(), newState, entry.indices(), shards)); + } else { + entries.add(entry); } } - return currentState; + return new RestoreInProgress(entries.toArray(new RestoreInProgress.Entry[entries.size()])); + } else { + return oldRestore; } + } - @Override - public void onFailure(String source, @Nullable Exception e) { - for (UpdateIndexShardRestoreStatusRequest request : drainedRequests) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]", request.snapshot(), request.shardId(), request.status()), e); - } - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (batchedRestoreInfo != null) { - for (final Entry>> entry : batchedRestoreInfo.entrySet()) { - final Snapshot snapshot = entry.getKey(); - final RestoreInfo restoreInfo = entry.getValue().v1(); - final ImmutableOpenMap shards = entry.getValue().v2(); - RoutingTable routingTable = newState.getRoutingTable(); - final List waitForStarted = new ArrayList<>(); - for (ObjectObjectCursor shard : shards) { - if (shard.value.state() == RestoreInProgress.State.SUCCESS ) { - ShardId shardId = shard.key; - ShardRouting shardRouting = findPrimaryShard(routingTable, shardId); - if (shardRouting != null && !shardRouting.active()) { - logger.trace("[{}][{}] waiting for the shard to start", snapshot, shardId); - waitForStarted.add(shardId); - } - } - } - if (waitForStarted.isEmpty()) { - notifyListeners(snapshot, restoreInfo); - } else { - clusterService.addLast(new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent event) { - if (event.routingTableChanged()) { - RoutingTable routingTable = event.state().getRoutingTable(); - for (Iterator iterator = waitForStarted.iterator(); iterator.hasNext();) { - ShardId shardId = iterator.next(); - ShardRouting shardRouting = findPrimaryShard(routingTable, shardId); - // Shard disappeared (index deleted) or became active - if (shardRouting == null || shardRouting.active()) { - iterator.remove(); - logger.trace("[{}][{}] shard disappeared or started - removing", snapshot, shardId); - } - } - } - if (waitForStarted.isEmpty()) { - notifyListeners(snapshot, restoreInfo); - clusterService.remove(this); - } - } - }); - } - } - } - } - - private ShardRouting findPrimaryShard(RoutingTable routingTable, ShardId shardId) { - IndexRoutingTable indexRoutingTable = routingTable.index(shardId.getIndex()); - if (indexRoutingTable != null) { - IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardId.id()); - if (indexShardRoutingTable != null) { - return indexShardRoutingTable.primaryShard(); - } - } - return null; - } - - private void notifyListeners(Snapshot snapshot, RestoreInfo restoreInfo) { - for (ActionListener listener : listeners) { - try { - listener.onResponse(new RestoreCompletionResponse(snapshot, restoreInfo)); - } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to update snapshot status for [{}]", listener), e); - } - } - } - }); } - private boolean completed(ImmutableOpenMap shards) { + public static RestoreInProgress.Entry restoreInProgress(ClusterState state, Snapshot snapshot) { + final RestoreInProgress restoreInProgress = state.custom(RestoreInProgress.TYPE); + if (restoreInProgress != null) { + for (RestoreInProgress.Entry e : restoreInProgress.entries()) { + if (e.snapshot().equals(snapshot)) { + return e; + } + } + } + return null; + } + + static class CleanRestoreStateTaskExecutor implements ClusterStateTaskExecutor, ClusterStateTaskListener { + + static class Task { + final Snapshot snapshot; + + Task(Snapshot snapshot) { + this.snapshot = snapshot; + } + + @Override + public String toString() { + return "clean restore state for restoring snapshot " + snapshot; + } + } + + private final Logger logger; + + public CleanRestoreStateTaskExecutor(Logger logger) { + this.logger = logger; + } + + @Override + public BatchResult execute(final ClusterState currentState, final List tasks) throws Exception { + final BatchResult.Builder resultBuilder = BatchResult.builder().successes(tasks); + Set completedSnapshots = tasks.stream().map(e -> e.snapshot).collect(Collectors.toSet()); + final List entries = new ArrayList<>(); + final RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE); + boolean changed = false; + if (restoreInProgress != null) { + for (RestoreInProgress.Entry entry : restoreInProgress.entries()) { + if (completedSnapshots.contains(entry.snapshot()) == false) { + entries.add(entry); + } else { + changed = true; + } + } + } + if (changed == false) { + return resultBuilder.build(currentState); + } + RestoreInProgress updatedRestoreInProgress = new RestoreInProgress(entries.toArray(new RestoreInProgress.Entry[entries.size()])); + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(currentState.getCustoms()); + builder.put(RestoreInProgress.TYPE, updatedRestoreInProgress); + ImmutableOpenMap customs = builder.build(); + return resultBuilder.build(ClusterState.builder(currentState).customs(customs).build()); + } + + @Override + public void onFailure(final String source, final Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + } + + @Override + public void onNoLongerMaster(String source) { + logger.debug("no longer master while processing restore state update [{}]", source); + } + + } + + private void cleanupRestoreState(ClusterChangedEvent event) { + ClusterState state = event.state(); + + RestoreInProgress restoreInProgress = state.custom(RestoreInProgress.TYPE); + if (restoreInProgress != null) { + for (RestoreInProgress.Entry entry : restoreInProgress.entries()) { + if (entry.state().completed()) { + assert completed(entry.shards()) : "state says completed but restore entries are not"; + clusterService.submitStateUpdateTask( + "clean up snapshot restore state", + new CleanRestoreStateTaskExecutor.Task(entry.snapshot()), + ClusterStateTaskConfig.build(Priority.URGENT), + cleanRestoreStateTaskExecutor, + cleanRestoreStateTaskExecutor); + } + } + } + } + + public static RestoreInProgress.State overallState(RestoreInProgress.State nonCompletedState, + ImmutableOpenMap shards) { + boolean hasFailed = false; + for (ObjectCursor status : shards.values()) { + if (!status.value.state().completed()) { + return nonCompletedState; + } + if (status.value.state() == RestoreInProgress.State.FAILURE) { + hasFailed = true; + } + } + if (hasFailed) { + return RestoreInProgress.State.FAILURE; + } else { + return RestoreInProgress.State.SUCCESS; + } + } + + public static boolean completed(ImmutableOpenMap shards) { for (ObjectCursor status : shards.values()) { if (!status.value.state().completed()) { return false; @@ -683,7 +717,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis return true; } - private int failedShards(ImmutableOpenMap shards) { + public static int failedShards(ImmutableOpenMap shards) { int failedShards = 0; for (ObjectCursor status : shards.values()) { if (status.value.state() == RestoreInProgress.State.FAILURE) { @@ -727,53 +761,6 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis } } - /** - * Checks if any of the deleted indices are still recovering and fails recovery on the shards of these indices - * - * @param event cluster changed event - */ - private void processDeletedIndices(ClusterChangedEvent event) { - RestoreInProgress restore = event.state().custom(RestoreInProgress.TYPE); - if (restore == null) { - // Not restoring - nothing to do - return; - } - - if (!event.indicesDeleted().isEmpty()) { - // Some indices were deleted, let's make sure all indices that we are restoring still exist - for (RestoreInProgress.Entry entry : restore.entries()) { - List shardsToFail = null; - for (ObjectObjectCursor shard : entry.shards()) { - if (!shard.value.state().completed()) { - if (!event.state().metaData().hasIndex(shard.key.getIndex().getName())) { - if (shardsToFail == null) { - shardsToFail = new ArrayList<>(); - } - shardsToFail.add(shard.key); - } - } - } - if (shardsToFail != null) { - for (ShardId shardId : shardsToFail) { - logger.trace("[{}] failing running shard restore [{}]", entry.snapshot(), shardId); - updateRestoreStateOnMaster(new UpdateIndexShardRestoreStatusRequest(entry.snapshot(), shardId, new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE, "index was deleted"))); - } - } - } - } - } - - /** - * Fails the given snapshot restore operation for the given shard - */ - public void failRestore(Snapshot snapshot, ShardId shardId) { - logger.debug("[{}] failed to restore shard [{}]", snapshot, shardId); - UpdateIndexShardRestoreStatusRequest request = new UpdateIndexShardRestoreStatusRequest(snapshot, shardId, - new ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), RestoreInProgress.State.FAILURE)); - transportService.sendRequest(clusterService.state().nodes().getMasterNode(), - UPDATE_RESTORE_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); - } - private boolean failed(SnapshotInfo snapshot, String index) { for (SnapshotShardFailure failure : snapshot.shardFailures()) { if (index.equals(failure.index())) { @@ -810,34 +797,11 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis } } - /** - * Adds restore completion listener - *

    - * This listener is called for each snapshot that finishes restore operation in the cluster. It's responsibility of - * the listener to decide if it's called for the appropriate snapshot or not. - * - * @param listener restore completion listener - */ - public void addListener(ActionListener listener) { - this.listeners.add(listener); - } - - /** - * Removes restore completion listener - *

    - * This listener is called for each snapshot that finishes restore operation in the cluster. - * - * @param listener restore completion listener - */ - public void removeListener(ActionListener listener) { - this.listeners.remove(listener); - } - @Override public void clusterChanged(ClusterChangedEvent event) { try { if (event.localNodeMaster()) { - processDeletedIndices(event); + cleanupRestoreState(event); } } catch (Exception t) { logger.warn("Failed to update restore state ", t); @@ -1061,69 +1025,4 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis } } - - /** - * Internal class that is used to send notifications about finished shard restore operations to master node - */ - public static class UpdateIndexShardRestoreStatusRequest extends TransportRequest { - private Snapshot snapshot; - private ShardId shardId; - private ShardRestoreStatus status; - - volatile boolean processed; // state field, no need to serialize - - public UpdateIndexShardRestoreStatusRequest() { - - } - - private UpdateIndexShardRestoreStatusRequest(Snapshot snapshot, ShardId shardId, ShardRestoreStatus status) { - this.snapshot = snapshot; - this.shardId = shardId; - this.status = status; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - snapshot = new Snapshot(in); - shardId = ShardId.readShardId(in); - status = ShardRestoreStatus.readShardRestoreStatus(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - snapshot.writeTo(out); - shardId.writeTo(out); - status.writeTo(out); - } - - public Snapshot snapshot() { - return snapshot; - } - - public ShardId shardId() { - return shardId; - } - - public ShardRestoreStatus status() { - return status; - } - - @Override - public String toString() { - return "" + snapshot + ", shardId [" + shardId + "], status [" + status.state() + "]"; - } - } - - /** - * Internal class that is used to send notifications about finished shard restore operations to master node - */ - class UpdateRestoreStateRequestHandler implements TransportRequestHandler { - @Override - public void messageReceived(UpdateIndexShardRestoreStatusRequest request, final TransportChannel channel) throws Exception { - updateRestoreStateOnMaster(request); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 1f7a4ee4fd6..d939236732e 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -29,8 +29,10 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -42,11 +44,13 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.engine.SnapshotFailedEngineException; +import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus.Stage; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; @@ -80,7 +84,7 @@ import static org.elasticsearch.cluster.SnapshotsInProgress.completed; * This service runs on data and master nodes and controls currently snapshotted shards on these nodes. It is responsible for * starting and stopping shard level snapshots */ -public class SnapshotShardsService extends AbstractLifecycleComponent implements ClusterStateListener { +public class SnapshotShardsService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { public static final String UPDATE_SNAPSHOT_ACTION_NAME = "internal:cluster/snapshot/update_snapshot"; @@ -156,12 +160,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements SnapshotsInProgress prev = event.previousState().custom(SnapshotsInProgress.TYPE); SnapshotsInProgress curr = event.state().custom(SnapshotsInProgress.TYPE); - if (prev == null) { - if (curr != null) { - processIndexShardSnapshots(event); - } - } else if (prev.equals(curr) == false) { - processIndexShardSnapshots(event); + if ((prev == null && curr != null) || (prev != null && prev.equals(curr) == false)) { + processIndexShardSnapshots(event); } String masterNodeId = event.state().nodes().getMasterNodeId(); if (masterNodeId != null && masterNodeId.equals(event.previousState().nodes().getMasterNodeId()) == false) { @@ -173,6 +173,18 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements } } + @Override + public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { + // abort any snapshots occurring on the soon-to-be closed shard + Map snapshotShardsMap = shardSnapshots; + for (Map.Entry snapshotShards : snapshotShardsMap.entrySet()) { + Map shards = snapshotShards.getValue().shards; + if (shards.containsKey(shardId)) { + logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", shardId, snapshotShards.getKey().getSnapshotId()); + shards.get(shardId).abort(); + } + } + } /** * Returns status of shards that are snapshotted on the node and belong to the given snapshot @@ -205,6 +217,16 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements final Snapshot snapshot = entry.getKey(); if (snapshotsInProgress != null && snapshotsInProgress.snapshot(snapshot) != null) { survivors.put(entry.getKey(), entry.getValue()); + } else { + // abort any running snapshots of shards for the removed entry; + // this could happen if for some reason the cluster state update for aborting + // running shards is missed, then the snapshot is removed is a subsequent cluster + // state update, which is being processed here + for (IndexShardSnapshotStatus snapshotStatus : entry.getValue().shards.values()) { + if (snapshotStatus.stage() == Stage.INIT || snapshotStatus.stage() == Stage.STARTED) { + snapshotStatus.abort(); + } + } } } @@ -221,7 +243,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements if (entry.state() == SnapshotsInProgress.State.STARTED) { Map startedShards = new HashMap<>(); SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshot()); - for (ObjectObjectCursor shard : entry.shards()) { + for (ObjectObjectCursor shard : entry.shards()) { // Add all new shards to start processing on if (localNodeId.equals(shard.value.nodeId())) { if (shard.value.state() == SnapshotsInProgress.State.INIT && (snapshotShards == null || !snapshotShards.shards.containsKey(shard.key))) { @@ -249,7 +271,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements // Abort all running shards for this snapshot SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshot()); if (snapshotShards != null) { - for (ObjectObjectCursor shard : entry.shards()) { + for (ObjectObjectCursor shard : entry.shards()) { IndexShardSnapshotStatus snapshotStatus = snapshotShards.shards.get(shard.key); if (snapshotStatus != null) { switch (snapshotStatus.stage()) { @@ -263,12 +285,12 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements case DONE: logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, updating status on the master", entry.snapshot(), shard.key); updateIndexShardSnapshotStatus(entry.snapshot(), shard.key, - new SnapshotsInProgress.ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.SUCCESS)); + new ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.SUCCESS)); break; case FAILURE: logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, updating status on the master", entry.snapshot(), shard.key); updateIndexShardSnapshotStatus(entry.snapshot(), shard.key, - new SnapshotsInProgress.ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.FAILED, snapshotStatus.failure())); + new ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.FAILED, snapshotStatus.failure())); break; default: throw new IllegalStateException("Unknown snapshot shard stage " + snapshotStatus.stage()); @@ -309,18 +331,18 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements @Override public void doRun() { snapshot(indexShard, entry.getKey(), indexId, shardEntry.getValue()); - updateIndexShardSnapshotStatus(entry.getKey(), shardId, new SnapshotsInProgress.ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.SUCCESS)); + updateIndexShardSnapshotStatus(entry.getKey(), shardId, new ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.SUCCESS)); } @Override public void onFailure(Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to create snapshot", shardId, entry.getKey()), e); - updateIndexShardSnapshotStatus(entry.getKey(), shardId, new SnapshotsInProgress.ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.FAILED, ExceptionsHelper.detailedMessage(e))); + updateIndexShardSnapshotStatus(entry.getKey(), shardId, new ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.FAILED, ExceptionsHelper.detailedMessage(e))); } }); } catch (Exception e) { - updateIndexShardSnapshotStatus(entry.getKey(), shardId, new SnapshotsInProgress.ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.FAILED, ExceptionsHelper.detailedMessage(e))); + updateIndexShardSnapshotStatus(entry.getKey(), shardId, new ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.FAILED, ExceptionsHelper.detailedMessage(e))); } } } @@ -383,23 +405,23 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements if (snapshot.state() == SnapshotsInProgress.State.STARTED || snapshot.state() == SnapshotsInProgress.State.ABORTED) { Map localShards = currentSnapshotShards(snapshot.snapshot()); if (localShards != null) { - ImmutableOpenMap masterShards = snapshot.shards(); + ImmutableOpenMap masterShards = snapshot.shards(); for(Map.Entry localShard : localShards.entrySet()) { ShardId shardId = localShard.getKey(); IndexShardSnapshotStatus localShardStatus = localShard.getValue(); - SnapshotsInProgress.ShardSnapshotStatus masterShard = masterShards.get(shardId); + ShardSnapshotStatus masterShard = masterShards.get(shardId); if (masterShard != null && masterShard.state().completed() == false) { // Master knows about the shard and thinks it has not completed - if (localShardStatus.stage() == IndexShardSnapshotStatus.Stage.DONE) { + if (localShardStatus.stage() == Stage.DONE) { // but we think the shard is done - we need to make new master know that the shard is done logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard is done locally, updating status on the master", snapshot.snapshot(), shardId); updateIndexShardSnapshotStatus(snapshot.snapshot(), shardId, - new SnapshotsInProgress.ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.SUCCESS)); - } else if (localShard.getValue().stage() == IndexShardSnapshotStatus.Stage.FAILURE) { + new ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.SUCCESS)); + } else if (localShard.getValue().stage() == Stage.FAILURE) { // but we think the shard failed - we need to make new master know that the shard failed logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard failed locally, updating status on master", snapshot.snapshot(), shardId); updateIndexShardSnapshotStatus(snapshot.snapshot(), shardId, - new SnapshotsInProgress.ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.FAILED, localShardStatus.failure())); + new ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.FAILED, localShardStatus.failure())); } } @@ -427,7 +449,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements public static class UpdateIndexShardSnapshotStatusRequest extends TransportRequest { private Snapshot snapshot; private ShardId shardId; - private SnapshotsInProgress.ShardSnapshotStatus status; + private ShardSnapshotStatus status; private volatile boolean processed; // state field, no need to serialize @@ -435,7 +457,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements } - public UpdateIndexShardSnapshotStatusRequest(Snapshot snapshot, ShardId shardId, SnapshotsInProgress.ShardSnapshotStatus status) { + public UpdateIndexShardSnapshotStatusRequest(Snapshot snapshot, ShardId shardId, ShardSnapshotStatus status) { this.snapshot = snapshot; this.shardId = shardId; this.status = status; @@ -446,7 +468,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements super.readFrom(in); snapshot = new Snapshot(in); shardId = ShardId.readShardId(in); - status = SnapshotsInProgress.ShardSnapshotStatus.readShardSnapshotStatus(in); + status = new ShardSnapshotStatus(in); } @Override @@ -465,7 +487,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements return shardId; } - public SnapshotsInProgress.ShardSnapshotStatus status() { + public ShardSnapshotStatus status() { return status; } @@ -486,7 +508,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements /** * Updates the shard status */ - public void updateIndexShardSnapshotStatus(Snapshot snapshot, ShardId shardId, SnapshotsInProgress.ShardSnapshotStatus status) { + public void updateIndexShardSnapshotStatus(Snapshot snapshot, ShardId shardId, ShardSnapshotStatus status) { UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status); try { if (clusterService.state().nodes().isLocalNodeElectedMaster()) { @@ -533,7 +555,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements int changedCount = 0; final List entries = new ArrayList<>(); for (SnapshotsInProgress.Entry entry : snapshots.entries()) { - ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); boolean updated = false; for (int i = 0; i < batchSize; i++) { diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index ea8deea5661..f5ef5f111df 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -793,12 +793,12 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus } private boolean removedNodesCleanupNeeded(ClusterChangedEvent event) { - // Check if we just became the master - boolean newMaster = !event.previousState().nodes().isLocalNodeElectedMaster(); SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); if (snapshotsInProgress == null) { return false; } + // Check if we just became the master + boolean newMaster = !event.previousState().nodes().isLocalNodeElectedMaster(); for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { if (newMaster && (snapshot.state() == State.SUCCESS || snapshot.state() == State.INIT)) { // We just replaced old master and snapshots in intermediate states needs to be cleaned diff --git a/core/src/main/java/org/elasticsearch/tasks/Task.java b/core/src/main/java/org/elasticsearch/tasks/Task.java index 53e5855c025..bc2e8418141 100644 --- a/core/src/main/java/org/elasticsearch/tasks/Task.java +++ b/core/src/main/java/org/elasticsearch/tasks/Task.java @@ -42,8 +42,14 @@ public class Task { private final TaskId parentTask; + /** + * The task's start time as a wall clock time since epoch ({@link System#currentTimeMillis()} style). + */ private final long startTime; + /** + * The task's start time as a relative time ({@link System#nanoTime()} style). + */ private final long startTimeNanos; public Task(long id, String type, String action, String description, TaskId parentTask) { @@ -64,20 +70,27 @@ public class Task { * Build a version of the task status you can throw over the wire and back * to the user. * - * @param node - * the node this task is running on + * @param localNodeId + * the id of the node this task is running on * @param detailed * should the information include detailed, potentially slow to * generate data? */ - public TaskInfo taskInfo(DiscoveryNode node, boolean detailed) { + public final TaskInfo taskInfo(String localNodeId, boolean detailed) { String description = null; Task.Status status = null; if (detailed) { description = getDescription(); status = getStatus(); } - return new TaskInfo(new TaskId(node.getId(), getId()), getType(), getAction(), description, status, startTime, + return taskInfo(localNodeId, description, status); + } + + /** + * Build a proper {@link TaskInfo} for this task. + */ + protected final TaskInfo taskInfo(String localNodeId, String description, Status status) { + return new TaskInfo(new TaskId(localNodeId, getId()), getType(), getAction(), description, status, startTime, System.nanoTime() - startTimeNanos, this instanceof CancellableTask, parentTask); } @@ -110,7 +123,7 @@ public class Task { } /** - * Returns the task start time + * Returns the task's start time as a wall clock time since epoch ({@link System#currentTimeMillis()} style). */ public long getStartTime() { return startTime; @@ -136,12 +149,12 @@ public class Task { public interface Status extends ToXContent, NamedWriteable {} public TaskResult result(DiscoveryNode node, Exception error) throws IOException { - return new TaskResult(taskInfo(node, true), error); + return new TaskResult(taskInfo(node.getId(), true), error); } public TaskResult result(DiscoveryNode node, ActionResponse response) throws IOException { if (response instanceof ToXContent) { - return new TaskResult(taskInfo(node, true), (ToXContent) response); + return new TaskResult(taskInfo(node.getId(), true), (ToXContent) response); } else { throw new IllegalStateException("response has to implement ToXContent to be able to store the results"); } diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeClientNode.java b/core/src/main/java/org/elasticsearch/tasks/TaskCancelledException.java similarity index 61% rename from core/src/main/java/org/elasticsearch/tribe/TribeClientNode.java rename to core/src/main/java/org/elasticsearch/tasks/TaskCancelledException.java index d9520aef768..4da9eb05a68 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeClientNode.java +++ b/core/src/main/java/org/elasticsearch/tasks/TaskCancelledException.java @@ -16,21 +16,23 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.tasks; -package org.elasticsearch.tribe; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.Node; -import org.elasticsearch.plugins.Plugin; - -import java.util.Collection; +import java.io.IOException; /** - * An internal node that connects to a remove cluster, as part of a tribe node. + * A generic exception that can be thrown by a task when it's cancelled by the task manager API */ -class TribeClientNode extends Node { - TribeClientNode(Settings settings, Collection> classpathPlugins) { - super(new Environment(settings), classpathPlugins); +public class TaskCancelledException extends ElasticsearchException { + + public TaskCancelledException(String msg) { + super(msg); + } + + public TaskCancelledException(StreamInput in) throws IOException{ + super(in); } } diff --git a/core/src/main/java/org/elasticsearch/tasks/TaskManager.java b/core/src/main/java/org/elasticsearch/tasks/TaskManager.java index 003a51c3175..0f2165824fe 100644 --- a/core/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/core/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -459,7 +459,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen if (cancellationReason == null) { nodesWithChildTasks.add(nodeId); } else { - throw new IllegalStateException("cannot register child task request, the task is already cancelled"); + throw new TaskCancelledException("cannot register child task request, the task is already cancelled"); } } } diff --git a/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java b/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java index de7dbbaefc9..9e5469fd16a 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java +++ b/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.SizeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.node.Node; @@ -79,7 +78,7 @@ public final class FixedExecutorBuilder extends ExecutorBuilder builders = new HashMap<>(); - final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); + final int availableProcessors = EsExecutors.numberOfProcessors(settings); final int halfProcMaxAt5 = halfNumberOfProcessorsMaxFive(availableProcessors); final int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors); final int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512); diff --git a/core/src/main/java/org/elasticsearch/transport/ActionTransportException.java b/core/src/main/java/org/elasticsearch/transport/ActionTransportException.java index 9b0e887c219..e35e77cba3e 100644 --- a/core/src/main/java/org/elasticsearch/transport/ActionTransportException.java +++ b/core/src/main/java/org/elasticsearch/transport/ActionTransportException.java @@ -22,7 +22,6 @@ package org.elasticsearch.transport; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.transport.TransportAddressSerializers; import java.io.IOException; @@ -39,11 +38,7 @@ public class ActionTransportException extends TransportException { public ActionTransportException(StreamInput in) throws IOException { super(in); - if (in.readBoolean()) { - address = TransportAddressSerializers.addressFromStream(in); - } else { - address = null; - } + address = in.readOptionalWriteable(TransportAddress::new); action = in.readOptionalString(); } @@ -62,12 +57,7 @@ public class ActionTransportException extends TransportException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (address != null) { - out.writeBoolean(true); - TransportAddressSerializers.addressToStream(out, address); - } else { - out.writeBoolean(false); - } + out.writeOptionalWriteable(address); out.writeOptionalString(action); } diff --git a/core/src/main/java/org/elasticsearch/transport/BindTransportException.java b/core/src/main/java/org/elasticsearch/transport/BindTransportException.java index 4f55c04a1b1..0c60faec3b5 100644 --- a/core/src/main/java/org/elasticsearch/transport/BindTransportException.java +++ b/core/src/main/java/org/elasticsearch/transport/BindTransportException.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class BindTransportException extends TransportException { public BindTransportException(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/transport/ConnectTransportException.java b/core/src/main/java/org/elasticsearch/transport/ConnectTransportException.java index 0d36733cbbc..7c64db1a63f 100644 --- a/core/src/main/java/org/elasticsearch/transport/ConnectTransportException.java +++ b/core/src/main/java/org/elasticsearch/transport/ConnectTransportException.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -/** - * - */ public class ConnectTransportException extends ActionTransportException { private final DiscoveryNode node; diff --git a/core/src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java b/core/src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java index 41c2b9b23ed..c5814cf0fef 100644 --- a/core/src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java +++ b/core/src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java @@ -21,9 +21,6 @@ package org.elasticsearch.transport; import org.elasticsearch.threadpool.ThreadPool; -/** - * - */ public class EmptyTransportResponseHandler implements TransportResponseHandler { public static final EmptyTransportResponseHandler INSTANCE_SAME = new EmptyTransportResponseHandler(ThreadPool.Names.SAME); diff --git a/core/src/main/java/org/elasticsearch/transport/NodeDisconnectedException.java b/core/src/main/java/org/elasticsearch/transport/NodeDisconnectedException.java index a5abd21082d..32dcf6700c2 100644 --- a/core/src/main/java/org/elasticsearch/transport/NodeDisconnectedException.java +++ b/core/src/main/java/org/elasticsearch/transport/NodeDisconnectedException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class NodeDisconnectedException extends ConnectTransportException { public NodeDisconnectedException(DiscoveryNode node, String action) { diff --git a/core/src/main/java/org/elasticsearch/transport/NodeShouldNotConnectException.java b/core/src/main/java/org/elasticsearch/transport/NodeShouldNotConnectException.java index 9ea621bc40d..bcca9e54b33 100644 --- a/core/src/main/java/org/elasticsearch/transport/NodeShouldNotConnectException.java +++ b/core/src/main/java/org/elasticsearch/transport/NodeShouldNotConnectException.java @@ -24,8 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - */ public class NodeShouldNotConnectException extends NodeNotConnectedException { public NodeShouldNotConnectException(DiscoveryNode fromNode, DiscoveryNode node) { diff --git a/core/src/main/java/org/elasticsearch/transport/NotSerializableTransportException.java b/core/src/main/java/org/elasticsearch/transport/NotSerializableTransportException.java index 9a838628aeb..c6b90f61d0c 100644 --- a/core/src/main/java/org/elasticsearch/transport/NotSerializableTransportException.java +++ b/core/src/main/java/org/elasticsearch/transport/NotSerializableTransportException.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class NotSerializableTransportException extends TransportException { public NotSerializableTransportException(Throwable t) { diff --git a/core/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java b/core/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java index 3d0e603195b..fe01a1fdbcc 100644 --- a/core/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java +++ b/core/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java @@ -27,9 +27,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -/** - * - */ public class PlainTransportFuture extends BaseFuture implements TransportFuture, TransportResponseHandler { diff --git a/core/src/main/java/org/elasticsearch/transport/ReceiveTimeoutTransportException.java b/core/src/main/java/org/elasticsearch/transport/ReceiveTimeoutTransportException.java index 6245a1afd22..88cdc52f10a 100644 --- a/core/src/main/java/org/elasticsearch/transport/ReceiveTimeoutTransportException.java +++ b/core/src/main/java/org/elasticsearch/transport/ReceiveTimeoutTransportException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class ReceiveTimeoutTransportException extends ActionTransportException { public ReceiveTimeoutTransportException(DiscoveryNode node, String action, String msg) { diff --git a/core/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/core/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java index 27342af1bdd..b4a79edbc0b 100644 --- a/core/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java +++ b/core/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -25,9 +25,6 @@ import org.elasticsearch.tasks.TaskManager; import java.io.IOException; import java.util.function.Supplier; -/** - * - */ public class RequestHandlerRegistry { private final String action; diff --git a/core/src/main/java/org/elasticsearch/transport/SendRequestTransportException.java b/core/src/main/java/org/elasticsearch/transport/SendRequestTransportException.java index 41c743c775e..08defb38f3b 100644 --- a/core/src/main/java/org/elasticsearch/transport/SendRequestTransportException.java +++ b/core/src/main/java/org/elasticsearch/transport/SendRequestTransportException.java @@ -25,9 +25,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class SendRequestTransportException extends ActionTransportException implements ElasticsearchWrapperException { public SendRequestTransportException(DiscoveryNode node, String action, Throwable cause) { diff --git a/core/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java b/core/src/main/java/org/elasticsearch/transport/TaskAwareTransportRequestHandler.java similarity index 53% rename from core/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java rename to core/src/main/java/org/elasticsearch/transport/TaskAwareTransportRequestHandler.java index c4458b7883d..12899d86d43 100644 --- a/core/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java +++ b/core/src/main/java/org/elasticsearch/transport/TaskAwareTransportRequestHandler.java @@ -17,21 +17,14 @@ * under the License. */ -package org.elasticsearch.transport.local; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.AbstractSimpleTransportTestCase; - -public class SimpleLocalTransportTests extends AbstractSimpleTransportTestCase { +package org.elasticsearch.transport; +/** + * Transport request handlers that is using task context + */ +public abstract class TaskAwareTransportRequestHandler implements TransportRequestHandler { @Override - protected MockTransportService build(Settings settings, Version version) { - MockTransportService transportService = MockTransportService.local(settings, version, threadPool); - transportService.start(); - return transportService; + public final void messageReceived(T request, TransportChannel channel) throws Exception { + throw new UnsupportedOperationException("the task parameter is required"); } } diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index 0dd0c05dcad..9e4e0262080 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -20,6 +20,7 @@ package org.elasticsearch.transport; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; + import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.IOUtils; @@ -53,7 +54,6 @@ import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.PortsRange; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; @@ -61,6 +61,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractLifecycleRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -257,7 +258,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i NodeChannels channels = entry.getValue(); for (Channel channel : channels.allChannels) { try { - sendMessage(channel, pingHeader, successfulPings::inc, false); + sendMessage(channel, pingHeader, successfulPings::inc); } catch (Exception e) { if (isOpen(channel)) { logger.debug( @@ -283,7 +284,15 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i @Override protected void onAfterInLifecycle() { - threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, this); + try { + threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, this); + } catch (EsRejectedExecutionException ex) { + if (ex.isExecutorShutdown()) { + logger.debug("couldn't schedule new ping execution, executor is shutting down", ex); + } else { + throw ex; + } + } } @Override @@ -357,6 +366,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i return Arrays.asList(recovery, bulk, reg, state, ping); } + @Override public synchronized void close() throws IOException { closeChannels(allChannels); } @@ -506,7 +516,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i @Override public boolean addressSupported(Class address) { - return InetSocketTransportAddress.class.equals(address); + return TransportAddress.class.equals(address); } @Override @@ -640,7 +650,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i for (int i = 0; i < boundAddresses.size(); i++) { InetSocketAddress boundAddress = boundAddresses.get(i); boundAddressesHostStrings[i] = boundAddress.getHostString(); - transportBoundAddresses[i] = new InetSocketTransportAddress(boundAddress); + transportBoundAddresses[i] = new TransportAddress(boundAddress); } final String[] publishHosts; @@ -658,7 +668,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } final int publishPort = resolvePublishPort(name, settings, profileSettings, boundAddresses, publishInetAddress); - final TransportAddress publishAddress = new InetSocketTransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); + final TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); return new BoundTransportAddress(transportBoundAddresses, publishAddress); } @@ -757,7 +767,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i int limit = Math.min(ports.length, perAddressLimit); for (int i = 0; i < limit; i++) { for (InetAddress address : addresses) { - transportAddresses.add(new InetSocketTransportAddress(address, ports[i])); + transportAddresses.add(new TransportAddress(address, ports[i])); } } return transportAddresses.toArray(new TransportAddress[transportAddresses.size()]); @@ -837,7 +847,23 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } else if (e instanceof TcpTransport.HttpOnTransportException) { // in case we are able to return data, serialize the exception content and sent it back to the client if (isOpen(channel)) { - sendMessage(channel, new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)), () -> {}, true); + final Runnable closeChannel = () -> { + try { + closeChannels(Collections.singletonList(channel)); + } catch (IOException e1) { + logger.debug("failed to close httpOnTransport channel", e1); + } + }; + boolean success = false; + try { + sendMessage(channel, new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)), closeChannel); + success = true; + } finally { + if (success == false) { + // it's fine to call this more than once + closeChannel.run(); + } + } } } else { logger.warn( @@ -871,7 +897,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i protected abstract NodeChannels connectToChannelsLight(DiscoveryNode node) throws IOException; - protected abstract void sendMessage(Channel channel, BytesReference reference, Runnable sendListener, boolean close) throws IOException; + protected abstract void sendMessage(Channel channel, BytesReference reference, Runnable sendListener) throws IOException; /** * Connects to the node in a heavy way. @@ -899,6 +925,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i byte status = 0; status = TransportStatus.setRequest(status); ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); + // we wrap this in a release once since if the onRequestSent callback throws an exception + // we might release things twice and this should be prevented + final Releasable toRelease = Releasables.releaseOnce(() -> Releasables.close(bStream.bytes())); boolean addedReleaseListener = false; StreamOutput stream = bStream; try { @@ -919,9 +948,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i stream.writeString(action); BytesReference message = buildMessage(requestId, status, node.getVersion(), request, stream, bStream); final TransportRequestOptions finalOptions = options; - Runnable onRequestSent = () -> { + Runnable onRequestSent = () -> { // this might be called in a different thread try { - Releasables.close(bStream.bytes()); + toRelease.close(); } finally { transportServiceAdapter.onRequestSent(node, requestId, action, request, finalOptions); } @@ -930,7 +959,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } finally { IOUtils.close(stream); if (!addedReleaseListener) { - Releasables.close(bStream.bytes()); + toRelease.close(); } } } @@ -944,7 +973,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i private boolean internalSendMessage(Channel targetChannel, BytesReference message, Runnable onRequestSent) throws IOException { boolean success; try { - sendMessage(targetChannel, message, onRequestSent, false); + sendMessage(targetChannel, message, onRequestSent); success = true; } catch (IOException ex) { // passing exception handling to deal with this and raise disconnect events and decide the right logging level @@ -967,7 +996,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i try (BytesStreamOutput stream = new BytesStreamOutput()) { stream.setVersion(nodeVersion); RemoteTransportException tx = new RemoteTransportException( - nodeName(), new InetSocketTransportAddress(getLocalAddress(channel)), action, error); + nodeName(), new TransportAddress(getLocalAddress(channel)), action, error); threadPool.getThreadContext().writeTo(stream); stream.writeException(tx); byte status = 0; @@ -976,7 +1005,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i final BytesReference bytes = stream.bytes(); final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length()); Runnable onRequestSent = () -> transportServiceAdapter.onResponseSent(requestId, action, error); - sendMessage(channel, new CompositeBytesReference(header, bytes), onRequestSent, false); + sendMessage(channel, new CompositeBytesReference(header, bytes), onRequestSent); } } @@ -993,6 +1022,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i byte status = 0; status = TransportStatus.setResponse(status); // TODO share some code with sendRequest ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); + // we wrap this in a release once since if the onRequestSent callback throws an exception + // we might release things twice and this should be prevented + final Releasable toRelease = Releasables.releaseOnce(() -> Releasables.close(bStream.bytes())); boolean addedReleaseListener = false; StreamOutput stream = bStream; try { @@ -1005,19 +1037,24 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i BytesReference reference = buildMessage(requestId, status, nodeVersion, response, stream, bStream); final TransportResponseOptions finalOptions = options; - Runnable onRequestSent = () -> { + Runnable onRequestSent = () -> { // this might be called in a different thread try { - Releasables.close(bStream.bytes()); + toRelease.close(); } finally { transportServiceAdapter.onResponseSent(requestId, action, response, finalOptions); } }; addedReleaseListener = internalSendMessage(channel, reference, onRequestSent); } finally { - IOUtils.close(stream); - if (!addedReleaseListener) { - Releasables.close(bStream.bytes()); + try { + IOUtils.close(stream); + } finally { + if (!addedReleaseListener) { + + toRelease.close(); + } } + } } @@ -1194,9 +1231,10 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } streamIn = compressor.streamInput(streamIn); } - if (version.onOrAfter(Version.CURRENT.minimumCompatibilityVersion()) == false || version.major != Version.CURRENT.major) { + if (version.onOrAfter(getCurrentVersion().minimumCompatibilityVersion()) == false + || version.major != getCurrentVersion().major) { throw new IllegalStateException("Received message from unsupported version: [" + version - + "] minimal compatible version is: [" +Version.CURRENT.minimumCompatibilityVersion() + "]"); + + "] minimal compatible version is: [" + getCurrentVersion().minimumCompatibilityVersion() + "]"); } streamIn = new NamedWriteableAwareStreamInput(streamIn, namedWriteableRegistry); streamIn.setVersion(version); @@ -1233,7 +1271,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i private void handleResponse(InetSocketAddress remoteAddress, final StreamInput stream, final TransportResponseHandler handler) { final TransportResponse response = handler.newInstance(); - response.remoteAddress(new InetSocketTransportAddress(remoteAddress)); + response.remoteAddress(new TransportAddress(remoteAddress)); try { response.readFrom(stream); } catch (Exception e) { @@ -1299,7 +1337,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i transportChannel = new TcpTransportChannel<>(this, channel, transportName, action, requestId, version, profileName, messageLengthBytes); final TransportRequest request = reg.newRequest(); - request.remoteAddress(new InetSocketTransportAddress(remoteAddress)); + request.remoteAddress(new TransportAddress(remoteAddress)); request.readFrom(stream); // in case we throw an exception, i.e. when the limit is hit, we don't want to verify validateRequest(stream, requestId, action); diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java b/core/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java index 1fceb5aa1a3..9b09f7f8ed3 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java @@ -66,7 +66,7 @@ public final class TcpTransportChannel implements TransportChannel { try { transport.sendResponse(version, channel, response, requestId, action, options); } finally { - release(); + release(false); } } @@ -75,19 +75,20 @@ public final class TcpTransportChannel implements TransportChannel { try { transport.sendErrorResponse(version, channel, exception, requestId, action); } finally { - release(); + release(true); } } private Exception releaseBy; - private void release() { - // attempt to release once atomically - if (released.compareAndSet(false, true) == false) { - throw new IllegalStateException("reserved bytes are already released", releaseBy); - } else { + private void release(boolean isExceptionResponse) { + if (released.compareAndSet(false, true)) { assert (releaseBy = new Exception()) != null; // easier to debug if it's already closed + transport.getInFlightRequestBreaker().addWithoutBreaking(-reservedBytes); + } else if (isExceptionResponse == false) { + // only fail if we are not sending an error - we might send the error triggered by the previous + // sendResponse call + throw new IllegalStateException("reserved bytes are already released", releaseBy); } - transport.getInFlightRequestBreaker().addWithoutBreaking(-reservedBytes); } @Override diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java index d0b2edf09bb..9a85a867888 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transport.java +++ b/core/src/main/java/org/elasticsearch/transport/Transport.java @@ -32,9 +32,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - * - */ public interface Transport extends LifecycleComponent { diff --git a/core/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java b/core/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java index ad9c7991fa4..32bb9ca4ec2 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java @@ -21,9 +21,6 @@ package org.elasticsearch.transport; import org.elasticsearch.cluster.node.DiscoveryNode; -/** - * - */ public interface TransportConnectionListener { void onNodeConnected(DiscoveryNode node); diff --git a/core/src/main/java/org/elasticsearch/transport/TransportException.java b/core/src/main/java/org/elasticsearch/transport/TransportException.java index 7cc1c54c236..1d1878e0028 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportException.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportException.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class TransportException extends ElasticsearchException { public TransportException(Throwable cause) { super(cause); diff --git a/core/src/main/java/org/elasticsearch/transport/TransportFuture.java b/core/src/main/java/org/elasticsearch/transport/TransportFuture.java index 5d34d0c0338..32c5f700da0 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportFuture.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportFuture.java @@ -22,9 +22,6 @@ package org.elasticsearch.transport; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -/** - * - */ public interface TransportFuture extends Future { /** diff --git a/core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java b/core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java index d8072a81ba6..7b478ce48e6 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportInterceptor.java @@ -33,7 +33,7 @@ public interface TransportInterceptor { * {@link TransportService#registerRequestHandler(String, Supplier, String, TransportRequestHandler)}. The returned handler is * used instead of the passed in handler. By default the provided handler is returned. */ - default TransportRequestHandler interceptHandler(String action, + default TransportRequestHandler interceptHandler(String action, String executor, TransportRequestHandler actualHandler) { return actualHandler; } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportRequest.java b/core/src/main/java/org/elasticsearch/transport/TransportRequest.java index 83eb7cf0d81..54f3a228a81 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportRequest.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportRequest.java @@ -26,8 +26,6 @@ import org.elasticsearch.tasks.TaskId; import java.io.IOException; -/** - */ public abstract class TransportRequest extends TransportMessage { public static class Empty extends TransportRequest { public static final Empty INSTANCE = new Empty(); diff --git a/core/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java b/core/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java index 17a3f26f26c..8c90b82fe7c 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java @@ -21,9 +21,6 @@ package org.elasticsearch.transport; import org.elasticsearch.tasks.Task; -/** - * - */ public interface TransportRequestHandler { /** diff --git a/core/src/main/java/org/elasticsearch/transport/TransportResponse.java b/core/src/main/java/org/elasticsearch/transport/TransportResponse.java index b778c6f213d..25ae72a479f 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportResponse.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportResponse.java @@ -19,8 +19,6 @@ package org.elasticsearch.transport; -/** - */ public abstract class TransportResponse extends TransportMessage { public static class Empty extends TransportResponse { diff --git a/core/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java b/core/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java index 2602a3e1662..9d2c1801240 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java @@ -19,9 +19,6 @@ package org.elasticsearch.transport; -/** - * - */ public interface TransportResponseHandler { /** diff --git a/core/src/main/java/org/elasticsearch/transport/TransportResponseOptions.java b/core/src/main/java/org/elasticsearch/transport/TransportResponseOptions.java index eb163641749..a36793ed5d8 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportResponseOptions.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportResponseOptions.java @@ -19,9 +19,6 @@ package org.elasticsearch.transport; -/** - * - */ public class TransportResponseOptions { private final boolean compress; diff --git a/core/src/main/java/org/elasticsearch/transport/TransportSerializationException.java b/core/src/main/java/org/elasticsearch/transport/TransportSerializationException.java index ad42af9b048..ae09b23d2a1 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportSerializationException.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportSerializationException.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -/** - * - */ public class TransportSerializationException extends TransportException { public TransportSerializationException(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 8c5886f7311..842ff05409f 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -25,10 +25,10 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.Loggers; @@ -46,6 +46,8 @@ import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; @@ -114,13 +116,19 @@ public class TransportService extends AbstractLifecycleComponent { private final Logger tracerLog; volatile String[] tracerLogInclude; - volatile String[] tracelLogExclude; + volatile String[] tracerLogExclude; /** if set will call requests sent to this id to shortcut and executed locally */ volatile DiscoveryNode localNode = null; - @Inject - public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor) { + /** + * Build the service. + * + * @param clusterSettings if non null the the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings + * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. + */ + public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor, + @Nullable ClusterSettings clusterSettings) { super(settings); this.transport = transport; this.threadPool = threadPool; @@ -132,6 +140,10 @@ public class TransportService extends AbstractLifecycleComponent { taskManager = createTaskManager(); this.interceptor = transportInterceptor; this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); + if (clusterSettings != null) { + clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude); + clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude); + } } /** @@ -159,19 +171,12 @@ public class TransportService extends AbstractLifecycleComponent { return new TaskManager(settings); } - // These need to be optional as they don't exist in the context of a transport client - @Inject(optional = true) - public void setDynamicSettings(ClusterSettings clusterSettings) { - clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude); - clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude); - } - void setTracerLogInclude(List tracerLogInclude) { this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY); } - void setTracerLogExclude(List tracelLogExclude) { - this.tracelLogExclude = tracelLogExclude.toArray(Strings.EMPTY_ARRAY); + void setTracerLogExclude(List tracerLogExclude) { + this.tracerLogExclude = tracerLogExclude.toArray(Strings.EMPTY_ARRAY); } @Override @@ -459,6 +464,27 @@ public class TransportService extends AbstractLifecycleComponent { asyncSender.sendRequest(node, action, request, options, handler); } + public void sendChildRequest(final DiscoveryNode node, final String action, + final TransportRequest request, final Task parentTask, + final TransportResponseHandler handler) { + sendChildRequest(node, action, request, parentTask, TransportRequestOptions.EMPTY, handler); + } + + public void sendChildRequest(final DiscoveryNode node, final String action, + final TransportRequest request, final Task parentTask, + final TransportRequestOptions options, + final TransportResponseHandler handler) { + request.setParentTask(localNode.getId(), parentTask.getId()); + try { + taskManager.registerChildTask(parentTask, node.getId()); + sendRequest(node, action, request, options, handler); + } catch (TaskCancelledException ex) { + // The parent task is already cancelled - just fail the request + handler.handleException(new TransportException(ex)); + } + + } + private void sendRequestInternal(final DiscoveryNode node, final String action, final TransportRequest request, final TransportRequestOptions options, @@ -586,8 +612,8 @@ public class TransportService extends AbstractLifecycleComponent { return false; } } - if (tracelLogExclude.length > 0) { - return !Regex.simpleMatch(tracelLogExclude, action); + if (tracerLogExclude.length > 0) { + return !Regex.simpleMatch(tracerLogExclude, action); } return true; } @@ -610,7 +636,7 @@ public class TransportService extends AbstractLifecycleComponent { */ public final void registerRequestHandler(String action, Supplier requestFactory, String executor, TransportRequestHandler handler) { - handler = interceptor.interceptHandler(action, handler); + handler = interceptor.interceptHandler(action, executor, handler); RequestHandlerRegistry reg = new RequestHandlerRegistry<>( action, requestFactory, taskManager, handler, executor, false, true); registerRequestHandler(reg); @@ -620,7 +646,7 @@ public class TransportService extends AbstractLifecycleComponent { * Registers a new request handler * * @param action The action the request handler is associated with - * @param request The request class that will be used to constrcut new instances for streaming + * @param request The request class that will be used to construct new instances for streaming * @param executor The executor the request handling will be executed on * @param forceExecution Force execution on the executor queue and never reject it * @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached. @@ -630,7 +656,7 @@ public class TransportService extends AbstractLifecycleComponent { String executor, boolean forceExecution, boolean canTripCircuitBreaker, TransportRequestHandler handler) { - handler = interceptor.interceptHandler(action, handler); + handler = interceptor.interceptHandler(action, executor, handler); RequestHandlerRegistry reg = new RequestHandlerRegistry<>( action, request, taskManager, handler, executor, forceExecution, canTripCircuitBreaker); registerRequestHandler(reg); @@ -929,6 +955,7 @@ public class TransportService extends AbstractLifecycleComponent { * are invoked we restore the context. */ private static final class ContextRestoreResponseHandler implements TransportResponseHandler { + private final TransportResponseHandler delegate; private final ThreadContext.StoredContext threadContext; @@ -958,6 +985,12 @@ public class TransportService extends AbstractLifecycleComponent { public String executor() { return delegate.executor(); } + + @Override + public String toString() { + return getClass().getName() + "/" + delegate.toString(); + } + } static class DirectResponseChannel implements TransportChannel { diff --git a/core/src/main/java/org/elasticsearch/transport/Transports.java b/core/src/main/java/org/elasticsearch/transport/Transports.java index 9b4dc4d5a6a..c187e3baf23 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transports.java +++ b/core/src/main/java/org/elasticsearch/transport/Transports.java @@ -20,7 +20,6 @@ package org.elasticsearch.transport; import org.elasticsearch.http.HttpServerTransport; -import org.elasticsearch.transport.local.LocalTransport; import java.util.Arrays; @@ -38,7 +37,6 @@ public enum Transports { public static final boolean isTransportThread(Thread t) { final String threadName = t.getName(); for (String s : Arrays.asList( - LocalTransport.LOCAL_TRANSPORT_THREAD_NAME_PREFIX, HttpServerTransport.HTTP_SERVER_BOSS_THREAD_NAME_PREFIX, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, TcpTransport.TRANSPORT_SERVER_BOSS_THREAD_NAME_PREFIX, diff --git a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java deleted file mode 100644 index f65312391d6..00000000000 --- a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java +++ /dev/null @@ -1,430 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.local; - -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.component.Lifecycle; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.LocalTransportAddress; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ActionNotFoundTransportException; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.NodeNotConnectedException; -import org.elasticsearch.transport.RemoteTransportException; -import org.elasticsearch.transport.RequestHandlerRegistry; -import org.elasticsearch.transport.ResponseHandlerFailureTransportException; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportSerializationException; -import org.elasticsearch.transport.TransportServiceAdapter; -import org.elasticsearch.transport.Transports; -import org.elasticsearch.transport.support.TransportStatus; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; - -public class LocalTransport extends AbstractLifecycleComponent implements Transport { - - public static final String LOCAL_TRANSPORT_THREAD_NAME_PREFIX = "local_transport"; - final ThreadPool threadPool; - private final ThreadPoolExecutor workers; - private volatile TransportServiceAdapter transportServiceAdapter; - private volatile BoundTransportAddress boundAddress; - private volatile LocalTransportAddress localAddress; - private static final ConcurrentMap transports = newConcurrentMap(); - private static final AtomicLong transportAddressIdGenerator = new AtomicLong(); - private final ConcurrentMap connectedNodes = newConcurrentMap(); - protected final NamedWriteableRegistry namedWriteableRegistry; - private final CircuitBreakerService circuitBreakerService; - - public static final String TRANSPORT_LOCAL_ADDRESS = "transport.local.address"; - public static final String TRANSPORT_LOCAL_WORKERS = "transport.local.workers"; - public static final String TRANSPORT_LOCAL_QUEUE = "transport.local.queue"; - - public LocalTransport(Settings settings, ThreadPool threadPool, - NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { - super(settings); - this.threadPool = threadPool; - int workerCount = this.settings.getAsInt(TRANSPORT_LOCAL_WORKERS, EsExecutors.boundedNumberOfProcessors(settings)); - int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1); - logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize); - final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, LOCAL_TRANSPORT_THREAD_NAME_PREFIX); - this.workers = EsExecutors.newFixed(LOCAL_TRANSPORT_THREAD_NAME_PREFIX, workerCount, queueSize, threadFactory, - threadPool.getThreadContext()); - this.namedWriteableRegistry = namedWriteableRegistry; - this.circuitBreakerService = circuitBreakerService; - } - - @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) { - return new TransportAddress[]{new LocalTransportAddress(address)}; - } - - @Override - public boolean addressSupported(Class address) { - return LocalTransportAddress.class.equals(address); - } - - @Override - protected void doStart() { - String address = settings.get(TRANSPORT_LOCAL_ADDRESS); - if (address == null) { - address = Long.toString(transportAddressIdGenerator.incrementAndGet()); - } - localAddress = new LocalTransportAddress(address); - LocalTransport previous = transports.put(localAddress, this); - if (previous != null) { - throw new ElasticsearchException("local address [" + address + "] is already bound"); - } - boundAddress = new BoundTransportAddress(new TransportAddress[] { localAddress }, localAddress); - } - - @Override - protected void doStop() { - transports.remove(localAddress); - // now, go over all the transports connected to me, and raise disconnected event - for (final LocalTransport targetTransport : transports.values()) { - for (final Map.Entry entry : targetTransport.connectedNodes.entrySet()) { - if (entry.getValue() == this) { - targetTransport.disconnectFromNode(entry.getKey()); - } - } - } - } - - @Override - protected void doClose() { - ThreadPool.terminate(workers, 10, TimeUnit.SECONDS); - } - - @Override - public void transportServiceAdapter(TransportServiceAdapter transportServiceAdapter) { - this.transportServiceAdapter = transportServiceAdapter; - } - - @Override - public BoundTransportAddress boundAddress() { - return boundAddress; - } - - @Override - public Map profileBoundAddresses() { - return Collections.emptyMap(); - } - - @Override - public boolean nodeConnected(DiscoveryNode node) { - return connectedNodes.containsKey(node); - } - - @Override - public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException { - connectToNode(node); - } - - @Override - public void connectToNode(DiscoveryNode node) throws ConnectTransportException { - synchronized (this) { - if (connectedNodes.containsKey(node)) { - return; - } - final LocalTransport targetTransport = transports.get(node.getAddress()); - if (targetTransport == null) { - throw new ConnectTransportException(node, "Failed to connect"); - } - connectedNodes.put(node, targetTransport); - transportServiceAdapter.raiseNodeConnected(node); - } - } - - @Override - public void disconnectFromNode(DiscoveryNode node) { - synchronized (this) { - LocalTransport removed = connectedNodes.remove(node); - if (removed != null) { - transportServiceAdapter.raiseNodeDisconnected(node); - } - } - } - - @Override - public long serverOpen() { - return 0; - } - - @Override - public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, - TransportRequestOptions options) throws IOException, TransportException { - final Version version = Version.smallest(node.getVersion(), getVersion()); - - try (BytesStreamOutput stream = new BytesStreamOutput()) { - stream.setVersion(version); - - stream.writeLong(requestId); - byte status = 0; - status = TransportStatus.setRequest(status); - stream.writeByte(status); // 0 for request, 1 for response. - - threadPool.getThreadContext().writeTo(stream); - stream.writeString(action); - request.writeTo(stream); - - stream.close(); - - final LocalTransport targetTransport = connectedNodes.get(node); - if (targetTransport == null) { - throw new NodeNotConnectedException(node, "Node not connected"); - } - - final byte[] data = BytesReference.toBytes(stream.bytes()); - transportServiceAdapter.addBytesSent(data.length); - transportServiceAdapter.onRequestSent(node, requestId, action, request, options); - targetTransport.receiveMessage(version, data, action, requestId, this); - } - } - - /** - * entry point for incoming messages - * - * @param version the version used to serialize the message - * @param data message data - * @param action the action associated with this message (only used for error handling when data is not parsable) - * @param requestId requestId if the message is request (only used for error handling when data is not parsable) - * @param sourceTransport the source transport to respond to. - */ - public void receiveMessage(Version version, byte[] data, String action, @Nullable Long requestId, LocalTransport sourceTransport) { - try { - workers().execute(() -> { - ThreadContext threadContext = threadPool.getThreadContext(); - try (ThreadContext.StoredContext context = threadContext.stashContext()) { - processReceivedMessage(data, action, sourceTransport, version, requestId); - } - }); - } catch (EsRejectedExecutionException e) { - assert lifecycle.started() == false; - logger.trace("received request but shutting down. ignoring. action [{}], request id [{}]", action, requestId); - } - } - - ThreadPoolExecutor workers() { - return this.workers; - } - - CircuitBreaker inFlightRequestsBreaker() { - // We always obtain a fresh breaker to reflect changes to the breaker configuration. - return circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); - } - - /** processes received messages, assuming thread passing and thread context have all been dealt with */ - protected void processReceivedMessage(byte[] data, String action, LocalTransport sourceTransport, Version version, - @Nullable final Long sendRequestId) { - Transports.assertTransportThread(); - try { - transportServiceAdapter.addBytesReceived(data.length); - StreamInput stream = StreamInput.wrap(data); - stream.setVersion(version); - - long requestId = stream.readLong(); - byte status = stream.readByte(); - boolean isRequest = TransportStatus.isRequest(status); - threadPool.getThreadContext().readHeaders(stream); - if (isRequest) { - handleRequest(stream, requestId, data.length, sourceTransport, version); - } else { - final TransportResponseHandler handler = transportServiceAdapter.onResponseReceived(requestId); - // ignore if its null, the adapter logs it - if (handler != null) { - if (TransportStatus.isError(status)) { - handleResponseError(stream, handler); - } else { - handleResponse(stream, sourceTransport, handler); - } - } - } - } catch (Exception e) { - if (sendRequestId != null) { - TransportResponseHandler handler = sourceTransport.transportServiceAdapter.onResponseReceived(sendRequestId); - if (handler != null) { - RemoteTransportException error = new RemoteTransportException(nodeName(), localAddress, action, e); - sourceTransport.workers().execute(() -> { - ThreadContext threadContext = sourceTransport.threadPool.getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - sourceTransport.handleException(handler, error); - } - }); - } - } else { - logger.warn((Supplier) () -> new ParameterizedMessage("Failed to receive message for action [{}]", action), e); - } - } - } - - private void handleRequest(StreamInput stream, long requestId, int messageLengthBytes, LocalTransport sourceTransport, - Version version) throws Exception { - stream = new NamedWriteableAwareStreamInput(stream, namedWriteableRegistry); - final String action = stream.readString(); - final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); - transportServiceAdapter.onRequestReceived(requestId, action); - if (reg != null && reg.canTripCircuitBreaker()) { - inFlightRequestsBreaker().addEstimateBytesAndMaybeBreak(messageLengthBytes, ""); - } else { - inFlightRequestsBreaker().addWithoutBreaking(messageLengthBytes); - } - final LocalTransportChannel transportChannel = new LocalTransportChannel(this, transportServiceAdapter, sourceTransport, action, - requestId, version, messageLengthBytes, threadPool.getThreadContext()); - try { - if (reg == null) { - throw new ActionNotFoundTransportException("Action [" + action + "] not found"); - } - final TransportRequest request = reg.newRequest(); - request.remoteAddress(sourceTransport.boundAddress.publishAddress()); - request.readFrom(stream); - if (ThreadPool.Names.SAME.equals(reg.getExecutor())) { - //noinspection unchecked - reg.processMessageReceived(request, transportChannel); - } else { - threadPool.executor(reg.getExecutor()).execute(new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - //noinspection unchecked - reg.processMessageReceived(request, transportChannel); - } - - @Override - public boolean isForceExecution() { - return reg.isForceExecution(); - } - - @Override - public void onFailure(Exception e) { - if (lifecycleState() == Lifecycle.State.STARTED) { - // we can only send a response transport is started.... - try { - transportChannel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "Failed to send error message back to client for action [{}]", action), inner); - } - } - } - }); - } - } catch (Exception e) { - try { - transportChannel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "Failed to send error message back to client for action [{}]", action), inner); - } - - } - } - - protected void handleResponse(StreamInput buffer, LocalTransport sourceTransport, final TransportResponseHandler handler) { - buffer = new NamedWriteableAwareStreamInput(buffer, namedWriteableRegistry); - final TransportResponse response = handler.newInstance(); - response.remoteAddress(sourceTransport.boundAddress.publishAddress()); - try { - response.readFrom(buffer); - } catch (Exception e) { - handleException(handler, new TransportSerializationException( - "Failed to deserialize response of type [" + response.getClass().getName() + "]", e)); - return; - } - handleParsedResponse(response, handler); - } - - protected void handleParsedResponse(final TransportResponse response, final TransportResponseHandler handler) { - threadPool.executor(handler.executor()).execute(() -> { - try { - handler.handleResponse(response); - } catch (Exception e) { - handleException(handler, new ResponseHandlerFailureTransportException(e)); - } - }); - } - - private void handleResponseError(StreamInput buffer, final TransportResponseHandler handler) { - Exception exception; - try { - exception = buffer.readException(); - } catch (Exception e) { - exception = new TransportSerializationException("Failed to deserialize exception response from stream", e); - } - handleException(handler, exception); - } - - private void handleException(final TransportResponseHandler handler, Exception exception) { - if (!(exception instanceof RemoteTransportException)) { - exception = new RemoteTransportException("Not a remote transport exception", null, null, exception); - } - final RemoteTransportException rtx = (RemoteTransportException) exception; - try { - handler.handleException(rtx); - } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); - } - } - - @Override - public List getLocalAddresses() { - return Collections.singletonList("0.0.0.0"); - } - - protected Version getVersion() { // for tests - return Version.CURRENT; - } -} diff --git a/core/src/main/java/org/elasticsearch/transport/local/LocalTransportChannel.java b/core/src/main/java/org/elasticsearch/transport/local/LocalTransportChannel.java deleted file mode 100644 index 0c1e8747a12..00000000000 --- a/core/src/main/java/org/elasticsearch/transport/local/LocalTransportChannel.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.local; - -import org.elasticsearch.Version; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.transport.RemoteTransportException; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseOptions; -import org.elasticsearch.transport.TransportServiceAdapter; -import org.elasticsearch.transport.support.TransportStatus; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * - */ -public class LocalTransportChannel implements TransportChannel { - - private static final String LOCAL_TRANSPORT_PROFILE = "default"; - - private final LocalTransport sourceTransport; - private final TransportServiceAdapter sourceTransportServiceAdapter; - // the transport we will *send to* - private final LocalTransport targetTransport; - private final String action; - private final long requestId; - private final Version version; - private final long reservedBytes; - private final ThreadContext threadContext; - private final AtomicBoolean closed = new AtomicBoolean(); - - public LocalTransportChannel(LocalTransport sourceTransport, TransportServiceAdapter sourceTransportServiceAdapter, - LocalTransport targetTransport, String action, long requestId, Version version, long reservedBytes, - ThreadContext threadContext) { - this.sourceTransport = sourceTransport; - this.sourceTransportServiceAdapter = sourceTransportServiceAdapter; - this.targetTransport = targetTransport; - this.action = action; - this.requestId = requestId; - this.version = version; - this.reservedBytes = reservedBytes; - this.threadContext = threadContext; - } - - @Override - public String action() { - return action; - } - - @Override - public String getProfileName() { - return LOCAL_TRANSPORT_PROFILE; - } - - @Override - public void sendResponse(TransportResponse response) throws IOException { - sendResponse(response, TransportResponseOptions.EMPTY); - } - - @Override - public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException { - try (BytesStreamOutput stream = new BytesStreamOutput()) { - stream.setVersion(version); - stream.writeLong(requestId); - byte status = 0; - status = TransportStatus.setResponse(status); - stream.writeByte(status); // 0 for request, 1 for response. - threadContext.writeTo(stream); - response.writeTo(stream); - sendResponseData(BytesReference.toBytes(stream.bytes())); - sourceTransportServiceAdapter.onResponseSent(requestId, action, response, options); - } - } - - @Override - public void sendResponse(Exception exception) throws IOException { - BytesStreamOutput stream = new BytesStreamOutput(); - writeResponseExceptionHeader(stream); - RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(), - targetTransport.boundAddress().boundAddresses()[0], action, exception); - stream.writeException(tx); - sendResponseData(BytesReference.toBytes(stream.bytes())); - sourceTransportServiceAdapter.onResponseSent(requestId, action, exception); - } - - private void sendResponseData(byte[] data) { - close(); - targetTransport.receiveMessage(version, data, action, null, sourceTransport); - } - - private void close() { - // attempt to close once atomically - if (closed.compareAndSet(false, true) == false) { - throw new IllegalStateException("Channel is already closed"); - } - sourceTransport.inFlightRequestsBreaker().addWithoutBreaking(-reservedBytes); - } - - @Override - public long getRequestId() { - return requestId; - } - - @Override - public String getChannelType() { - return "local"; - } - - private void writeResponseExceptionHeader(BytesStreamOutput stream) throws IOException { - stream.writeLong(requestId); - byte status = 0; - status = TransportStatus.setResponse(status); - status = TransportStatus.setError(status); - stream.writeByte(status); - threadContext.writeTo(stream); - } -} diff --git a/core/src/main/java/org/elasticsearch/transport/support/TransportStatus.java b/core/src/main/java/org/elasticsearch/transport/support/TransportStatus.java index 87494456ff9..29c2bfb2781 100644 --- a/core/src/main/java/org/elasticsearch/transport/support/TransportStatus.java +++ b/core/src/main/java/org/elasticsearch/transport/support/TransportStatus.java @@ -19,8 +19,6 @@ package org.elasticsearch.transport.support; -/** - */ public class TransportStatus { private static final byte STATUS_REQRES = 1 << 0; diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index fd697340cd7..69ad77fc91e 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -124,7 +124,7 @@ public class TribeService extends AbstractLifecycleComponent { if (!NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.exists(settings)) { sb.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), nodesSettings.size()); } - sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery + sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "none"); // a tribe node should not use zen discovery // nothing is going to be discovered, since no master will be elected sb.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); if (sb.get("cluster.name") == null) { @@ -185,7 +185,7 @@ public class TribeService extends AbstractLifecycleComponent { private final List nodes = new CopyOnWriteArrayList<>(); public TribeService(Settings settings, ClusterService clusterService, final String tribeNodeId, - Collection> classpathPlugins) { + Function clientNodeBuilder) { super(settings); this.clusterService = clusterService; Map nodesSettings = new HashMap<>(settings.getGroups("tribe", true)); @@ -193,7 +193,7 @@ public class TribeService extends AbstractLifecycleComponent { nodesSettings.remove("on_conflict"); // remove prefix settings that don't indicate a client for (Map.Entry entry : nodesSettings.entrySet()) { Settings clientSettings = buildClientSettings(entry.getKey(), tribeNodeId, settings, entry.getValue()); - nodes.add(new TribeClientNode(clientSettings, classpathPlugins)); + nodes.add(clientNodeBuilder.apply(clientSettings)); } this.blockIndicesMetadata = BLOCKS_METADATA_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY); diff --git a/core/src/main/java/org/elasticsearch/watcher/WatcherHandle.java b/core/src/main/java/org/elasticsearch/watcher/WatcherHandle.java index da395c41a0b..0ef80306df2 100644 --- a/core/src/main/java/org/elasticsearch/watcher/WatcherHandle.java +++ b/core/src/main/java/org/elasticsearch/watcher/WatcherHandle.java @@ -19,9 +19,6 @@ package org.elasticsearch.watcher; -/** -* -*/ public class WatcherHandle { private final ResourceWatcherService.ResourceMonitor monitor; diff --git a/core/src/main/resources/config/favicon.ico b/core/src/main/resources/config/favicon.ico index 0eabd4c1edb..41320478b5f 100644 Binary files a/core/src/main/resources/config/favicon.ico and b/core/src/main/resources/config/favicon.ico differ diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index 97ccfb31bf2..999f036d9f4 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" { //// Very special jar permissions: //// These are dangerous permissions that we don't want to grant to everything. -grant codeBase "${codebase.lucene-core-6.2.0.jar}" { +grant codeBase "${codebase.lucene-core-6.3.0-snapshot-a66a445.jar}" { // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) // java 8 package permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; @@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-6.2.0.jar}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; -grant codeBase "${codebase.lucene-misc-6.2.0.jar}" { +grant codeBase "${codebase.lucene-misc-6.3.0-snapshot-a66a445.jar}" { // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper permission java.nio.file.LinkPermission "hard"; }; @@ -116,12 +116,16 @@ grant { // load averages on Linux permission java.io.FilePermission "/proc/loadavg", "read"; - // load averages on FreeBSD - permission java.io.FilePermission "/compat/linux/proc/loadavg", "read"; - // read max virtual memory areas permission java.io.FilePermission "/proc/sys/vm/max_map_count", "read"; // io stats on Linux permission java.io.FilePermission "/proc/diskstats", "read"; + + // control group stats on Linux + permission java.io.FilePermission "/proc/self/cgroup", "read"; + permission java.io.FilePermission "/sys/fs/cgroup/cpu", "read"; + permission java.io.FilePermission "/sys/fs/cgroup/cpu/-", "read"; + permission java.io.FilePermission "/sys/fs/cgroup/cpuacct", "read"; + permission java.io.FilePermission "/sys/fs/cgroup/cpuacct/-", "read"; }; diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index 43f6b62c3c3..1c780f96933 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -33,7 +33,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${codebase.lucene-test-framework-6.2.0.jar}" { +grant codeBase "${codebase.lucene-test-framework-6.3.0-snapshot-a66a445.jar}" { // needed by RamUsageTester permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed for testing hardlinks in StoreRecoveryTests since we install MockFS @@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-test-framework-6.2.0.jar}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; -grant codeBase "${codebase.randomizedtesting-runner-2.3.2.jar}" { +grant codeBase "${codebase.randomizedtesting-runner-2.4.0.jar}" { // optionally needed for access to private test methods (e.g. beforeClass) permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed to fail tests on uncaught exceptions from other threads diff --git a/core/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java b/core/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java index 3c77142221d..d1e5cecf76b 100644 --- a/core/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java +++ b/core/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java @@ -29,8 +29,6 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import static org.hamcrest.Matchers.equalTo; -/** - */ public class TruncateTokenFilterTests extends ESTestCase { public void testSimple() throws IOException { diff --git a/core/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java b/core/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java index 7756933a781..324e422531b 100644 --- a/core/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java +++ b/core/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java @@ -30,8 +30,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; -/** - */ public class UniqueTokenFilterTests extends ESTestCase { public void testSimple() throws IOException { Analyzer analyzer = new Analyzer() { diff --git a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index 94806422c17..3c727ddf6e3 100644 --- a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -53,8 +53,6 @@ import java.util.Set; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; -/** - */ public class BlendedTermQueryTests extends ESTestCase { public void testBooleanQuery() throws IOException { Directory dir = newDirectory(); diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 0611d706acc..8a2e965a7b4 100644 --- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -40,7 +40,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CancellableThreadsTests; import org.elasticsearch.common.util.set.Sets; @@ -51,7 +51,6 @@ import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.index.AlreadyExpiredException; import org.elasticsearch.index.Index; -import org.elasticsearch.index.engine.IndexFailedEngineException; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.shard.IllegalIndexShardStateException; @@ -373,11 +372,12 @@ public class ExceptionSerializationTests extends ESTestCase { } public void testActionTransportException() throws IOException { + TransportAddress transportAddress = buildNewFakeTransportAddress(); ActionTransportException ex = serialize( - new ActionTransportException("name?", new LocalTransportAddress("dead.end:666"), "ACTION BABY!", "message?", null)); + new ActionTransportException("name?", transportAddress, "ACTION BABY!", "message?", null)); assertEquals("ACTION BABY!", ex.action()); - assertEquals(new LocalTransportAddress("dead.end:666"), ex.address()); - assertEquals("[name?][local[dead.end:666]][ACTION BABY!] message?", ex.getMessage()); + assertEquals(transportAddress, ex.address()); + assertEquals("[name?][" + transportAddress.toString() +"][ACTION BABY!] message?", ex.getMessage()); } public void testSearchContextMissingException() throws IOException { @@ -399,21 +399,6 @@ public class ExceptionSerializationTests extends ESTestCase { assertEquals("TIMESTAMP", ex.timestamp()); } - public void testIndexFailedEngineException() throws IOException { - ShardId id = new ShardId("foo", "_na_", 1); - IndexFailedEngineException ex = serialize(new IndexFailedEngineException(id, "type", "id", null)); - assertEquals(ex.getShardId(), new ShardId("foo", "_na_", 1)); - assertEquals("type", ex.type()); - assertEquals("id", ex.id()); - assertNull(ex.getCause()); - - ex = serialize(new IndexFailedEngineException(null, "type", "id", new NullPointerException())); - assertNull(ex.getShardId()); - assertEquals("type", ex.type()); - assertEquals("id", ex.id()); - assertTrue(ex.getCause() instanceof NullPointerException); - } - public void testAliasesMissingException() throws IOException { AliasesNotFoundException ex = serialize(new AliasesNotFoundException("one", "two", "three")); assertEquals("aliases [one, two, three] missing", ex.getMessage()); @@ -440,16 +425,17 @@ public class ExceptionSerializationTests extends ESTestCase { } public void testConnectTransportException() throws IOException { - DiscoveryNode node = new DiscoveryNode("thenode", new LocalTransportAddress("dead.end:666"), + TransportAddress transportAddress = buildNewFakeTransportAddress(); + DiscoveryNode node = new DiscoveryNode("thenode", transportAddress, emptyMap(), emptySet(), Version.CURRENT); ConnectTransportException ex = serialize(new ConnectTransportException(node, "msg", "action", null)); - assertEquals("[][local[dead.end:666]][action] msg", ex.getMessage()); + assertEquals("[][" + transportAddress.toString() + "][action] msg", ex.getMessage()); assertEquals(node, ex.node()); assertEquals("action", ex.action()); assertNull(ex.getCause()); ex = serialize(new ConnectTransportException(node, "msg", "action", new NullPointerException())); - assertEquals("[][local[dead.end:666]][action] msg", ex.getMessage()); + assertEquals("[]["+ transportAddress+ "][action] msg", ex.getMessage()); assertEquals(node, ex.node()); assertEquals("action", ex.action()); assertTrue(ex.getCause() instanceof NullPointerException); @@ -678,7 +664,7 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(25, org.elasticsearch.script.GeneralScriptException.class); ids.put(26, org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class); ids.put(27, org.elasticsearch.snapshots.SnapshotCreationException.class); - ids.put(28, org.elasticsearch.index.engine.DeleteFailedEngineException.class); + ids.put(28, org.elasticsearch.index.engine.DeleteFailedEngineException.class); //deprecated in 6.0 ids.put(29, org.elasticsearch.index.engine.DocumentMissingException.class); ids.put(30, org.elasticsearch.snapshots.SnapshotException.class); ids.put(31, org.elasticsearch.indices.InvalidAliasNameException.class); @@ -730,7 +716,7 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(77, org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class); ids.put(78, org.elasticsearch.action.TimestampParsingException.class); ids.put(79, org.elasticsearch.action.RoutingMissingException.class); - ids.put(80, org.elasticsearch.index.engine.IndexFailedEngineException.class); + ids.put(80, org.elasticsearch.index.engine.IndexFailedEngineException.class); //deprecated in 6.0 ids.put(81, org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class); ids.put(82, org.elasticsearch.repositories.RepositoryException.class); ids.put(83, org.elasticsearch.transport.ReceiveTimeoutTransportException.class); @@ -793,6 +779,7 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(143, org.elasticsearch.script.ScriptException.class); ids.put(144, org.elasticsearch.cluster.NotMasterException.class); ids.put(145, org.elasticsearch.ElasticsearchStatusException.class); + ids.put(146, org.elasticsearch.tasks.TaskCancelledException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index 862cccab318..5e4e3812fc5 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -19,9 +19,15 @@ package org.elasticsearch; +import org.elasticsearch.action.ShardValidateQueryRequestTests; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryStringQueryBuilder; +import org.elasticsearch.monitor.os.OsStats; +import org.elasticsearch.index.query.SimpleQueryStringBuilder; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.hamcrest.Matchers; @@ -95,40 +101,23 @@ public class VersionTests extends ESTestCase { } public void testTooLongVersionFromString() { - try { - Version.fromString("1.0.0.1.3"); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); - } + Exception e = expectThrows(IllegalArgumentException.class, () -> Version.fromString("1.0.0.1.3")); + assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); } public void testTooShortVersionFromString() { - try { - Version.fromString("1.0"); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); - } - + Exception e = expectThrows(IllegalArgumentException.class, () -> Version.fromString("1.0")); + assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); } public void testWrongVersionFromString() { - try { - Version.fromString("WRONG.VERSION"); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); - } + Exception e = expectThrows(IllegalArgumentException.class, () -> Version.fromString("WRONG.VERSION")); + assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); } public void testVersionNoPresentInSettings() { - try { - Version.indexCreated(Settings.builder().build()); - fail("Expected IllegalArgumentException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), containsString("[index.version.created] is not present")); - } + Exception e = expectThrows(IllegalStateException.class, () -> Version.indexCreated(Settings.builder().build())); + assertThat(e.getMessage(), containsString("[index.version.created] is not present")); } public void testIndexCreatedVersion() { @@ -279,4 +268,23 @@ public class VersionTests extends ESTestCase { } } } + private static final Version V_20_0_0_UNRELEASED = new Version(20000099, Version.CURRENT.luceneVersion); + + // see comment in Version.java about this test + public void testUnknownVersions() { + assertUnknownVersion(V_20_0_0_UNRELEASED); + expectThrows(AssertionError.class, () -> assertUnknownVersion(Version.CURRENT)); + assertUnknownVersion(AliasFilter.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant + assertUnknownVersion(OsStats.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant + assertUnknownVersion(SimpleQueryStringBuilder.V_5_1_0_UNRELEASED); + assertUnknownVersion(QueryStringQueryBuilder.V_5_1_0_UNRELEASED); + // once we released 5.0.0 and it's added to Version.java we need to remove this constant + assertUnknownVersion(Script.V_5_1_0_UNRELEASED); + // once we released 5.0.0 and it's added to Version.java we need to remove this constant + } + + public static void assertUnknownVersion(Version version) { + assertFalse("Version " + version + " has been releaed don't use a new instance of this version", + VersionUtils.allVersions().contains(version)); + } } diff --git a/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java new file mode 100644 index 00000000000..1bc895095ba --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.explain.ExplainRequest; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchRequestParsers; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; + +public class ExplainRequestTests extends ESTestCase { + + protected NamedWriteableRegistry namedWriteableRegistry; + protected SearchRequestParsers searchRequestParsers; + public void setUp() throws Exception { + super.setUp(); + IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + List entries = new ArrayList<>(); + entries.addAll(indicesModule.getNamedWriteables()); + entries.addAll(searchModule.getNamedWriteables()); + namedWriteableRegistry = new NamedWriteableRegistry(entries); + searchRequestParsers = searchModule.getSearchRequestParsers(); + } + + + public void testSerialize() throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + ExplainRequest request = new ExplainRequest("index", "type", "id"); + request.fetchSourceContext(new FetchSourceContext(true, new String[]{"field1.*"}, new String[] {"field2.*"})); + request.filteringAlias(new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"})); + request.preference("the_preference"); + request.query(QueryBuilders.termQuery("field", "value")); + request.storedFields(new String[] {"field1", "field2"}); + request.routing("some_routing"); + request.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + ExplainRequest readRequest = new ExplainRequest(); + readRequest.readFrom(in); + assertEquals(request.filteringAlias(), readRequest.filteringAlias()); + assertArrayEquals(request.storedFields(), readRequest.storedFields()); + assertEquals(request.preference(), readRequest.preference()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.routing(), readRequest.routing()); + assertEquals(request.fetchSourceContext(), readRequest.fetchSourceContext()); + } + } + } + + // BWC test for changes from #20916 + public void testSerialize50Request() throws IOException { + ExplainRequest request = new ExplainRequest("index", "type", "id"); + request.fetchSourceContext(new FetchSourceContext(true, new String[]{"field1.*"}, new String[] {"field2.*"})); + request.filteringAlias(new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"})); + request.preference("the_preference"); + request.query(QueryBuilders.termQuery("field", "value")); + request.storedFields(new String[] {"field1", "field2"}); + request.routing("some_routing"); + BytesArray requestBytes = new BytesArray(Base64.getDecoder() + // this is a base64 encoded request generated with the same input + .decode("AAABBWluZGV4BHR5cGUCaWQBDHNvbWVfcm91dGluZwEOdGhlX3ByZWZlcmVuY2UEdGVybT" + + "+AAAAABWZpZWxkFQV2YWx1ZQIGYWxpYXMwBmFsaWFzMQECBmZpZWxkMQZmaWVsZDIBAQEIZmllbGQxLioBCGZpZWxkMi4qAA")); + try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { + in.setVersion(Version.V_5_0_0); + ExplainRequest readRequest = new ExplainRequest(); + readRequest.readFrom(in); + assertEquals(0, in.available()); + assertArrayEquals(request.filteringAlias().getAliases(), readRequest.filteringAlias().getAliases()); + expectThrows(IllegalStateException.class, () -> readRequest.filteringAlias().getQueryBuilder()); + assertArrayEquals(request.storedFields(), readRequest.storedFields()); + assertEquals(request.preference(), readRequest.preference()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.routing(), readRequest.routing()); + assertEquals(request.fetchSourceContext(), readRequest.fetchSourceContext()); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(Version.V_5_0_0); + readRequest.writeTo(output); + assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index d1d01610f18..e4a6eef33eb 100644 --- a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -78,6 +78,7 @@ import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.NetworkPlugin; @@ -85,8 +86,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -258,7 +259,7 @@ public class IndicesRequestIT extends ESIntegTestCase { String indexOrAlias = randomIndexOrAlias(); client().prepareIndex(indexOrAlias, "type", "id").setSource("field", "value").get(); UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "type", "id") - .script(new Script("ctx.op='delete'", ScriptService.ScriptType.INLINE, CustomScriptPlugin.NAME, Collections.emptyMap())); + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx.op='delete'", Collections.emptyMap())); UpdateResponse updateResponse = internalCluster().coordOnlyNodeClient().update(updateRequest).actionGet(); assertEquals(DocWriteResponse.Result.DELETED, updateResponse.getResult()); @@ -742,7 +743,7 @@ public class IndicesRequestIT extends ESIntegTestCase { public static class TestPlugin extends Plugin implements NetworkPlugin { public final InterceptingTransportService instance = new InterceptingTransportService(); @Override - public List getTransportInterceptors() { + public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry) { return Collections.singletonList(instance); } } @@ -752,7 +753,7 @@ public class IndicesRequestIT extends ESIntegTestCase { private final Map> requests = new HashMap<>(); @Override - public TransportRequestHandler interceptHandler(String action, + public TransportRequestHandler interceptHandler(String action, String executor, TransportRequestHandler actualHandler) { return new InterceptingRequestHandler<>(action, actualHandler); } diff --git a/core/src/test/java/org/elasticsearch/action/ListenerActionIT.java b/core/src/test/java/org/elasticsearch/action/ListenerActionIT.java index 93389f898e9..60e4b4d2622 100644 --- a/core/src/test/java/org/elasticsearch/action/ListenerActionIT.java +++ b/core/src/test/java/org/elasticsearch/action/ListenerActionIT.java @@ -28,8 +28,6 @@ import org.elasticsearch.test.ESIntegTestCase; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; -/** - */ public class ListenerActionIT extends ESIntegTestCase { public void testThreadedListeners() throws Throwable { final CountDownLatch latch = new CountDownLatch(1); diff --git a/core/src/test/java/org/elasticsearch/action/RejectionActionIT.java b/core/src/test/java/org/elasticsearch/action/RejectionActionIT.java index 6745906488a..e8db648111c 100644 --- a/core/src/test/java/org/elasticsearch/action/RejectionActionIT.java +++ b/core/src/test/java/org/elasticsearch/action/RejectionActionIT.java @@ -36,8 +36,6 @@ import java.util.concurrent.CountDownLatch; import static org.hamcrest.Matchers.equalTo; -/** - */ @ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 2) public class RejectionActionIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java new file mode 100644 index 00000000000..c1d18146a08 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.validate.query.ShardValidateQueryRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchRequestParsers; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; + +public class ShardValidateQueryRequestTests extends ESTestCase { + + protected NamedWriteableRegistry namedWriteableRegistry; + protected SearchRequestParsers searchRequestParsers; + public void setUp() throws Exception { + super.setUp(); + IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + List entries = new ArrayList<>(); + entries.addAll(indicesModule.getNamedWriteables()); + entries.addAll(searchModule.getNamedWriteables()); + namedWriteableRegistry = new NamedWriteableRegistry(entries); + searchRequestParsers = searchModule.getSearchRequestParsers(); + } + + + public void testSerialize() throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest("indices"); + validateQueryRequest.query(QueryBuilders.termQuery("field", "value")); + validateQueryRequest.rewrite(true); + validateQueryRequest.explain(false); + validateQueryRequest.types("type1", "type2"); + ShardValidateQueryRequest request = new ShardValidateQueryRequest(new ShardId("index", "foobar", 1), + new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}), validateQueryRequest); + request.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(); + readRequest.readFrom(in); + assertEquals(request.filteringAliases(), readRequest.filteringAliases()); + assertArrayEquals(request.types(), readRequest.types()); + assertEquals(request.explain(), readRequest.explain()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.rewrite(), readRequest.rewrite()); + assertEquals(request.shardId(), readRequest.shardId()); + } + } + } + + // BWC test for changes from #20916 + public void testSerialize50Request() throws IOException { + ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest("indices"); + validateQueryRequest.query(QueryBuilders.termQuery("field", "value")); + validateQueryRequest.rewrite(true); + validateQueryRequest.explain(false); + validateQueryRequest.types("type1", "type2"); + ShardValidateQueryRequest request = new ShardValidateQueryRequest(new ShardId("index", "foobar", 1), + new AliasFilter(QueryBuilders.termQuery("filter_field", "value"), new String[] {"alias0", "alias1"}), validateQueryRequest); + BytesArray requestBytes = new BytesArray(Base64.getDecoder() + // this is a base64 encoded request generated with the same input + .decode("AAVpbmRleAZmb29iYXIBAQdpbmRpY2VzBAR0ZXJtP4AAAAAFZmllbGQVBXZhbHVlAgV0eXBlMQV0eXBlMgIGYWxpYXMwBmFsaWFzMQABAA")); + try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { + in.setVersion(Version.V_5_0_0); + ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(); + readRequest.readFrom(in); + assertEquals(0, in.available()); + assertArrayEquals(request.filteringAliases().getAliases(), readRequest.filteringAliases().getAliases()); + expectThrows(IllegalStateException.class, () -> readRequest.filteringAliases().getQueryBuilder()); + assertArrayEquals(request.types(), readRequest.types()); + assertEquals(request.explain(), readRequest.explain()); + assertEquals(request.query(), readRequest.query()); + assertEquals(request.rewrite(), readRequest.rewrite()); + assertEquals(request.shardId(), readRequest.shardId()); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(Version.V_5_0_0); + readRequest.writeTo(output); + assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index 97c1a20c33f..23fdf3499b2 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.Requests; @@ -31,7 +30,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; -import java.util.List; import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -47,14 +45,11 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase { @TestLogging("_root:DEBUG") public void testDelayShards() throws Exception { logger.info("--> starting 3 nodes"); - List nodes = internalCluster().startNodesAsync(3).get(); + internalCluster().startNodesAsync(3).get(); // Wait for all 3 nodes to be up logger.info("--> waiting for 3 nodes to be up"); - assertBusy(() -> { - NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().get(); - assertThat(resp.getNodes().size(), equalTo(3)); - }); + ensureStableCluster(3); logger.info("--> creating 'test' index"); assertAcked(prepareCreate("test").setSettings(Settings.builder() @@ -66,7 +61,8 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase { logger.info("--> stopping a random node"); assertTrue(internalCluster().stopRandomDataNode()); - ensureYellow("test"); + logger.info("--> waiting for the master to remove the stopped node from the cluster state"); + ensureStableCluster(2); ClusterAllocationExplainResponse resp = client().admin().cluster().prepareAllocationExplain().useAnyUnassignedShard().get(); ClusterAllocationExplanation cae = resp.getExplanation(); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java index 577f73a89ed..d656702f9cd 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -67,7 +66,7 @@ public final class ClusterAllocationExplanationTests extends ESTestCase { .numberOfShards(1) .numberOfReplicas(1) .build(); - private DiscoveryNode node = new DiscoveryNode("node-0", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + private DiscoveryNode node = new DiscoveryNode("node-0", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); private static Decision.Multi yesDecision = new Decision.Multi(); private static Decision.Multi noDecision = new Decision.Multi(); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 04905de18c2..6913abd81a4 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -22,9 +22,8 @@ package org.elasticsearch.action.admin.cluster.node.stats; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats; +import org.elasticsearch.discovery.zen.PendingClusterStateStats; import org.elasticsearch.http.HttpStats; import org.elasticsearch.indices.breaker.AllCircuitBreakerStats; import org.elasticsearch.indices.breaker.CircuitBreakerStats; @@ -73,6 +72,30 @@ public class NodeStatsTests extends ESTestCase { assertEquals(nodeStats.getOs().getMem().getFreePercent(), deserializedNodeStats.getOs().getMem().getFreePercent()); assertEquals(nodeStats.getOs().getMem().getUsedPercent(), deserializedNodeStats.getOs().getMem().getUsedPercent()); assertEquals(nodeStats.getOs().getCpu().getPercent(), deserializedNodeStats.getOs().getCpu().getPercent()); + assertEquals( + nodeStats.getOs().getCgroup().getCpuAcctControlGroup(), + deserializedNodeStats.getOs().getCgroup().getCpuAcctControlGroup()); + assertEquals( + nodeStats.getOs().getCgroup().getCpuAcctUsageNanos(), + deserializedNodeStats.getOs().getCgroup().getCpuAcctUsageNanos()); + assertEquals( + nodeStats.getOs().getCgroup().getCpuControlGroup(), + deserializedNodeStats.getOs().getCgroup().getCpuControlGroup()); + assertEquals( + nodeStats.getOs().getCgroup().getCpuCfsPeriodMicros(), + deserializedNodeStats.getOs().getCgroup().getCpuCfsPeriodMicros()); + assertEquals( + nodeStats.getOs().getCgroup().getCpuCfsQuotaMicros(), + deserializedNodeStats.getOs().getCgroup().getCpuCfsQuotaMicros()); + assertEquals( + nodeStats.getOs().getCgroup().getCpuStat().getNumberOfElapsedPeriods(), + deserializedNodeStats.getOs().getCgroup().getCpuStat().getNumberOfElapsedPeriods()); + assertEquals( + nodeStats.getOs().getCgroup().getCpuStat().getNumberOfTimesThrottled(), + deserializedNodeStats.getOs().getCgroup().getCpuStat().getNumberOfTimesThrottled()); + assertEquals( + nodeStats.getOs().getCgroup().getCpuStat().getTimeThrottledNanos(), + deserializedNodeStats.getOs().getCgroup().getCpuStat().getTimeThrottledNanos()); assertArrayEquals(nodeStats.getOs().getCpu().getLoadAverage(), deserializedNodeStats.getOs().getCpu().getLoadAverage(), 0); } @@ -255,7 +278,7 @@ public class NodeStatsTests extends ESTestCase { } private static NodeStats createNodeStats() { - DiscoveryNode node = new DiscoveryNode("test_node", LocalTransportAddress.buildUnique(), + DiscoveryNode node = new DiscoveryNode("test_node", buildNewFakeTransportAddress(), emptyMap(), emptySet(), VersionUtils.randomVersion(random())); OsStats osStats = null; if (frequently()) { @@ -265,7 +288,14 @@ public class NodeStatsTests extends ESTestCase { } osStats = new OsStats(System.currentTimeMillis(), new OsStats.Cpu(randomShort(), loadAverages), new OsStats.Mem(randomLong(), randomLong()), - new OsStats.Swap(randomLong(), randomLong())); + new OsStats.Swap(randomLong(), randomLong()), + new OsStats.Cgroup( + randomAsciiOfLength(8), + randomPositiveLong(), + randomAsciiOfLength(8), + randomPositiveLong(), + randomPositiveLong(), + new OsStats.Cgroup.CpuStat(randomPositiveLong(), randomPositiveLong(), randomPositiveLong()))); } ProcessStats processStats = frequently() ? new ProcessStats(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), new ProcessStats.Cpu(randomShort(), randomPositiveLong()), diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index 9027b3d372e..6d0a0824490 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks; import com.carrotsearch.randomizedtesting.RandomizedContext; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; @@ -35,6 +35,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.ThreadPool; @@ -168,7 +169,7 @@ public class CancellableTasksTests extends TaskManagerTestCase { try { awaitBusy(() -> { if (((CancellableTask) task).isCancelled()) { - throw new RuntimeException("Cancelled"); + throw new TaskCancelledException("Cancelled"); } return false; }); @@ -379,9 +380,9 @@ public class CancellableTasksTests extends TaskManagerTestCase { // Introduce an additional pseudo random repeatable race conditions String delayName = RandomizedContext.current().getRunnerSeedAsString() + ":" + nodeId + ":" + name; Random random = new Random(delayName.hashCode()); - if (RandomInts.randomIntBetween(random, 0, 10) < 1) { + if (RandomNumbers.randomIntBetween(random, 0, 10) < 1) { try { - Thread.sleep(RandomInts.randomIntBetween(random, 20, 50)); + Thread.sleep(RandomNumbers.randomIntBetween(random, 20, 50)); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java index 656a5ab9ec4..5046a48b938 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.node.tasks; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.tasks.Task; @@ -37,26 +36,26 @@ import java.util.stream.Collectors; public class RecordingTaskManagerListener implements MockTaskManagerListener { private String[] actionMasks; - private DiscoveryNode localNode; + private String localNodeId; private List> events = new ArrayList<>(); - public RecordingTaskManagerListener(DiscoveryNode localNode, String... actionMasks) { + public RecordingTaskManagerListener(String localNodeId, String... actionMasks) { this.actionMasks = actionMasks; - this.localNode = localNode; + this.localNodeId = localNodeId; } @Override public synchronized void onTaskRegistered(Task task) { if (Regex.simpleMatch(actionMasks, task.getAction())) { - events.add(new Tuple<>(true, task.taskInfo(localNode, true))); + events.add(new Tuple<>(true, task.taskInfo(localNodeId, true))); } } @Override public synchronized void onTaskUnregistered(Task task) { if (Regex.simpleMatch(actionMasks, task.getAction())) { - events.add(new Tuple<>(false, task.taskInfo(localNode, true))); + events.add(new Tuple<>(false, task.taskInfo(localNodeId, true))); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index c457d3a30fa..19e38343efa 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -18,13 +18,6 @@ */ package org.elasticsearch.action.admin.cluster.node.tasks; -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; - import org.elasticsearch.Version; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction; @@ -44,19 +37,28 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.MockTcpTransport; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.local.LocalTransport; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; +import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; @@ -168,8 +170,9 @@ public abstract class TaskManagerTestCase extends ESTestCase { public TestNode(String name, ThreadPool threadPool, Settings settings) { clusterService = createClusterService(threadPool); transportService = new TransportService(settings, - new LocalTransport(settings, threadPool, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR) { + new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), + new NamedWriteableRegistry(Collections.emptyList()), new NetworkService(settings, Collections.emptyList())), + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null) { @Override protected TaskManager createTaskManager() { if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) { @@ -228,7 +231,7 @@ public abstract class TaskManagerTestCase extends ESTestCase { public static RecordingTaskManagerListener[] setupListeners(TestNode[] nodes, String... actionMasks) { RecordingTaskManagerListener[] listeners = new RecordingTaskManagerListener[nodes.length]; for (int i = 0; i < nodes.length; i++) { - listeners[i] = new RecordingTaskManagerListener(nodes[i].discoveryNode, actionMasks); + listeners[i] = new RecordingTaskManagerListener(nodes[i].discoveryNode.getId(), actionMasks); ((MockTaskManager) (nodes[i].transportService.getTaskManager())).addListener(listeners[i]); } return listeners; diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 134477cc204..3f8119e28a0 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -102,8 +102,10 @@ public class TasksIT extends ESIntegTestCase { private Map, RecordingTaskManagerListener> listeners = new HashMap<>(); @Override - protected boolean addMockTransportService() { - return false; + protected Collection> getMockPlugins() { + Collection> mockPlugins = new ArrayList<>(super.getMockPlugins()); + mockPlugins.remove(MockTransportService.TestPlugin.class); + return mockPlugins; } @Override @@ -780,7 +782,7 @@ public class TasksIT extends ESIntegTestCase { private void registerTaskManageListeners(String actionMasks) { for (String nodeName : internalCluster().getNodeNames()) { DiscoveryNode node = internalCluster().getInstance(ClusterService.class, nodeName).localNode(); - RecordingTaskManagerListener listener = new RecordingTaskManagerListener(node, actionMasks.split(",")); + RecordingTaskManagerListener listener = new RecordingTaskManagerListener(node.getId(), actionMasks.split(",")); ((MockTaskManager) internalCluster().getInstance(TransportService.class, nodeName).getTaskManager()).addListener(listener); RecordingTaskManagerListener oldListener = listeners.put(new Tuple<>(node.getName(), actionMasks), listener); assertNull(oldListener); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index e91105291e3..340d7199234 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -438,9 +438,9 @@ public class TestTaskPlugin extends Plugin implements ActionPlugin { } @Override - protected UnblockTestTaskResponse taskOperation(UnblockTestTasksRequest request, Task task) { + protected void taskOperation(UnblockTestTasksRequest request, Task task, ActionListener listener) { ((TestTask) task).unblock(); - return new UnblockTestTaskResponse(); + listener.onResponse(new UnblockTestTaskResponse()); } @Override diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index f964eeba9f7..fbe0302e5d9 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -47,7 +47,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.rest.action.admin.cluster.RestListTasksAction; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; @@ -66,12 +65,12 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; import static org.elasticsearch.action.support.PlainActionFuture.newFuture; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; public class TransportTasksActionTests extends TaskManagerTestCase { @@ -536,7 +535,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Try to cancel main task using action name CancelTasksRequest request = new CancelTasksRequest(); - request.setNodesIds(testNodes[0].discoveryNode.getId()); + request.setNodes(testNodes[0].discoveryNode.getId()); request.setReason("Testing Cancellation"); request.setActions(actionName); CancelTasksResponse response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request) @@ -627,13 +626,34 @@ public class TransportTasksActionTests extends TaskManagerTestCase { tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "testTasksAction", threadPool, testNodes[i].clusterService, testNodes[i].transportService) { @Override - protected TestTaskResponse taskOperation(TestTasksRequest request, Task task) { + protected void taskOperation(TestTasksRequest request, Task task, ActionListener listener) { logger.info("Task action on node {}", node); if (failTaskOnNode == node && task.getParentTaskId().isSet()) { logger.info("Failing on node {}", node); - throw new RuntimeException("Task level failure"); + // Fail in a random way to make sure we can handle all these ways + Runnable failureMode = randomFrom( + () -> { + logger.info("Throwing exception from taskOperation"); + throw new RuntimeException("Task level failure (direct)"); + }, + () -> { + logger.info("Calling listener synchronously with exception from taskOperation"); + listener.onFailure(new RuntimeException("Task level failure (sync listener)")); + }, + () -> { + logger.info("Calling listener asynchronously with exception from taskOperation"); + threadPool.generic() + .execute(() -> listener.onFailure(new RuntimeException("Task level failure (async listener)"))); + } + ); + failureMode.run(); + } else { + if (randomBoolean()) { + listener.onResponse(new TestTaskResponse("Success on node (sync)" + node)); + } else { + threadPool.generic().execute(() -> listener.onResponse(new TestTaskResponse("Success on node (async)" + node))); + } } - return new TestTaskResponse("Success on node " + node); } }; } @@ -643,10 +663,10 @@ public class TransportTasksActionTests extends TaskManagerTestCase { TestTasksRequest testTasksRequest = new TestTasksRequest(); testTasksRequest.setActions("testAction[n]"); // pick all test actions TestTasksResponse response = tasksActions[0].execute(testTasksRequest).get(); + assertThat(response.getTaskFailures(), hasSize(1)); // one task failed + assertThat(response.getTaskFailures().get(0).getReason(), containsString("Task level failure")); // Get successful responses from all nodes except one assertEquals(testNodes.length - 1, response.tasks.size()); - assertEquals(1, response.getTaskFailures().size()); // one task failed - assertThat(response.getTaskFailures().get(0).getReason(), containsString("Task level failure")); assertEquals(0, response.getNodeFailures().size()); // no nodes failed // Release all node tasks and wait for response @@ -698,8 +718,12 @@ public class TransportTasksActionTests extends TaskManagerTestCase { } @Override - protected TestTaskResponse taskOperation(TestTasksRequest request, Task task) { - return new TestTaskResponse(testNodes[node].getNodeId()); + protected void taskOperation(TestTasksRequest request, Task task, ActionListener listener) { + if (randomBoolean()) { + listener.onResponse(new TestTaskResponse(testNodes[node].getNodeId())); + } else { + threadPool.generic().execute(() -> listener.onResponse(new TestTaskResponse(testNodes[node].getNodeId()))); + } } }; } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java index c8133ba7ba8..e1205840976 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java @@ -58,7 +58,8 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; */ public class ClusterRerouteRequestTests extends ESTestCase { private static final int ROUNDS = 30; - private final List> RANDOM_COMMAND_GENERATORS = unmodifiableList(Arrays.asList( + private final List> RANDOM_COMMAND_GENERATORS = unmodifiableList( + Arrays.> asList( () -> new AllocateReplicaAllocationCommand(randomAsciiOfLengthBetween(2, 10), between(0, 1000), randomAsciiOfLengthBetween(2, 10)), () -> new AllocateEmptyPrimaryAllocationCommand(randomAsciiOfLengthBetween(2, 10), between(0, 1000), diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 6566eb96db8..af5909005ae 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -20,13 +20,21 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.indices.IndexAlreadyExistsException; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormat; +import java.util.Collection; +import java.util.Collections; import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -35,6 +43,12 @@ import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class RolloverIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + + public void testRolloverOnEmptyIndex() throws Exception { assertAcked(prepareCreate("test_index-1").addAlias(new Alias("test_alias")).get()); final RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias").get(); @@ -116,7 +130,7 @@ public class RolloverIT extends ESIntegTestCase { final RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias") .addMaxIndexAgeCondition(TimeValue.timeValueHours(4)).get(); assertThat(response.getOldIndex(), equalTo("test_index-0")); - assertThat(response.getNewIndex(), equalTo("test_index-0")); + assertThat(response.getNewIndex(), equalTo("test_index-000001")); assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(false)); assertThat(response.getConditionStatus().size(), equalTo(1)); @@ -161,4 +175,47 @@ public class RolloverIT extends ESIntegTestCase { assertThat(e.getIndex().getName(), equalTo("test_index-000001")); } } + + public void testRolloverWithDateMath() { + DateTime now = new DateTime(DateTimeZone.UTC); + String index = "test-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(now) + "-1"; + String dateMathExp = ""; + assertAcked(prepareCreate(dateMathExp).addAlias(new Alias("test_alias")).get()); + ensureGreen(index); + // now we modify the provided name such that we can test that the pattern is carried on + client().admin().indices().prepareClose(index).get(); + client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, + "")).get(); + + client().admin().indices().prepareOpen(index).get(); + ensureGreen(index); + RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias").get(); + assertThat(response.getOldIndex(), equalTo(index)); + assertThat(response.getNewIndex(), equalTo("test-" + DateTimeFormat.forPattern("YYYY.MM").print(now) + "-000002")); + assertThat(response.isDryRun(), equalTo(false)); + assertThat(response.isRolledOver(), equalTo(true)); + assertThat(response.getConditionStatus().size(), equalTo(0)); + + response = client().admin().indices().prepareRolloverIndex("test_alias").get(); + assertThat(response.getOldIndex(), equalTo("test-" + DateTimeFormat.forPattern("YYYY.MM").print(now) + "-000002")); + assertThat(response.getNewIndex(), equalTo("test-" + DateTimeFormat.forPattern("YYYY.MM").print(now) + "-000003")); + assertThat(response.isDryRun(), equalTo(false)); + assertThat(response.isRolledOver(), equalTo(true)); + assertThat(response.getConditionStatus().size(), equalTo(0)); + + GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(response.getOldIndex(), + response.getNewIndex()).get(); + assertEquals("", getSettingsResponse.getSetting(response.getOldIndex(), + IndexMetaData.SETTING_INDEX_PROVIDED_NAME)); + assertEquals("", getSettingsResponse.getSetting(response.getNewIndex(), + IndexMetaData.SETTING_INDEX_PROVIDED_NAME)); + + response = client().admin().indices().prepareRolloverIndex("test_alias").setNewIndexName("").get(); + assertThat(response.getOldIndex(), equalTo("test-" + DateTimeFormat.forPattern("YYYY.MM").print(now) + "-000003")); + assertThat(response.getNewIndex(), equalTo("test-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(now) + "-000004")); + assertThat(response.isDryRun(), equalTo(false)); + assertThat(response.isRolledOver(), equalTo(true)); + assertThat(response.getConditionStatus().size(), equalTo(0)); + } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index 2bd4c2883fc..9e80e92a281 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; @@ -33,6 +34,9 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.test.ESTestCase; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormat; import java.util.HashSet; import java.util.List; @@ -154,15 +158,20 @@ public class TransportRolloverActionTests extends ESTestCase { public void testGenerateRolloverIndexName() throws Exception { String invalidIndexName = randomAsciiOfLength(10) + "A"; + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY); expectThrows(IllegalArgumentException.class, () -> - TransportRolloverAction.generateRolloverIndexName(invalidIndexName)); + TransportRolloverAction.generateRolloverIndexName(invalidIndexName, indexNameExpressionResolver)); int num = randomIntBetween(0, 100); final String indexPrefix = randomAsciiOfLength(10); String indexEndingInNumbers = indexPrefix + "-" + num; - assertThat(TransportRolloverAction.generateRolloverIndexName(indexEndingInNumbers), + assertThat(TransportRolloverAction.generateRolloverIndexName(indexEndingInNumbers, indexNameExpressionResolver), equalTo(indexPrefix + "-" + String.format(Locale.ROOT, "%06d", num + 1))); - assertThat(TransportRolloverAction.generateRolloverIndexName("index-name-1"), equalTo("index-name-000002")); - assertThat(TransportRolloverAction.generateRolloverIndexName("index-name-2"), equalTo("index-name-000003")); + assertThat(TransportRolloverAction.generateRolloverIndexName("index-name-1", indexNameExpressionResolver), + equalTo("index-name-000002")); + assertThat(TransportRolloverAction.generateRolloverIndexName("index-name-2", indexNameExpressionResolver), + equalTo("index-name-000003")); + assertEquals( "", TransportRolloverAction.generateRolloverIndexName("", + indexNameExpressionResolver)); } public void testCreateIndexRequest() throws Exception { @@ -179,7 +188,7 @@ public class TransportRolloverActionTests extends ESTestCase { .build(); rolloverRequest.getCreateIndexRequest().settings(settings); final CreateIndexClusterStateUpdateRequest createIndexRequest = - TransportRolloverAction.prepareCreateIndexRequest(rolloverIndex, rolloverRequest); + TransportRolloverAction.prepareCreateIndexRequest(rolloverIndex, rolloverIndex, rolloverRequest); assertThat(createIndexRequest.settings(), equalTo(settings)); assertThat(createIndexRequest.index(), equalTo(rolloverIndex)); assertThat(createIndexRequest.cause(), equalTo("rollover_index")); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java index 9705009a044..95b847dd658 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -53,8 +52,8 @@ public class IndicesShardStoreResponseTests extends ESTestCase { List failures = new ArrayList<>(); ImmutableOpenIntMap.Builder> storeStatuses = ImmutableOpenIntMap.builder(); - DiscoveryNode node1 = new DiscoveryNode("node1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); - DiscoveryNode node2 = new DiscoveryNode("node2", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); List storeStatusList = new ArrayList<>(); storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, ShardStateMetaData.NO_VERSION, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); @@ -122,7 +121,7 @@ public class IndicesShardStoreResponseTests extends ESTestCase { } public void testStoreStatusOrdering() throws Exception { - DiscoveryNode node1 = new DiscoveryNode("node1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); List orderedStoreStatuses = new ArrayList<>(); orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, ShardStateMetaData.NO_VERSION, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, ShardStateMetaData.NO_VERSION, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java index 0c5164aec5b..5446d74911d 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.gateway.TestGatewayAllocator; @@ -144,7 +143,7 @@ public class TransportShrinkActionTests extends ESTestCase { } private DiscoveryNode newNode(String nodeId) { - return new DiscoveryNode(nodeId, LocalTransportAddress.buildUnique(), emptyMap(), + return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNode.Role.MASTER, DiscoveryNode.Role.DATA))), Version.CURRENT); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/template/BWCTemplateTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/template/BWCTemplateTests.java new file mode 100644 index 00000000000..69c6731aa15 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/template/BWCTemplateTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.template; + +import org.elasticsearch.test.ESSingleNodeTestCase; + +import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; + +/** + * Rudimentary tests that the templates used by Logstash and Beats + * prior to their 5.x releases work for newly created indices + */ +public class BWCTemplateTests extends ESSingleNodeTestCase { + public void testBeatsTemplatesBWC() throws Exception { + String metricBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json"); + String packetBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json"); + String fileBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json"); + String winLogBeat = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json"); + client().admin().indices().preparePutTemplate("metricbeat").setSource(metricBeat).get(); + client().admin().indices().preparePutTemplate("packetbeat").setSource(packetBeat).get(); + client().admin().indices().preparePutTemplate("filebeat").setSource(fileBeat).get(); + client().admin().indices().preparePutTemplate("winlogbeat").setSource(winLogBeat).get(); + + client().prepareIndex("metricbeat-foo", "doc", "1").setSource("message", "foo").get(); + client().prepareIndex("packetbeat-foo", "doc", "1").setSource("message", "foo").get(); + client().prepareIndex("filebeat-foo", "doc", "1").setSource("message", "foo").get(); + client().prepareIndex("winlogbeat-foo", "doc", "1").setSource("message", "foo").get(); + } + + public void testLogstashTemplatesBWC() throws Exception { + String ls5x = copyToStringFromClasspath("/org/elasticsearch/action/admin/indices/template/logstash-5.0.template.json"); + client().admin().indices().preparePutTemplate("logstash-5x").setSource(ls5x).get(); + client().prepareIndex("logstash-foo", "doc", "1").setSource("message", "foo").get(); + } + +} diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java index 3c89a6ab744..f41fc698fc1 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexTemplateException; @@ -165,10 +164,9 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase { null, null, null, - null, null, null, null); MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService, - new AliasValidator(Settings.EMPTY), null, null, + new AliasValidator(Settings.EMPTY), null, new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS)); final List throwables = new ArrayList<>(); @@ -189,7 +187,6 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase { private List putTemplateDetail(PutRequest request) throws Exception { IndicesService indicesService = getInstanceFromNode(IndicesService.class); ClusterService clusterService = getInstanceFromNode(ClusterService.class); - NodeServicesProvider nodeServicesProvider = getInstanceFromNode(NodeServicesProvider.class); MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService( Settings.EMPTY, clusterService, @@ -197,11 +194,10 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase { null, null, null, - nodeServicesProvider, null, null); MetaDataIndexTemplateService service = new MetaDataIndexTemplateService( - Settings.EMPTY, clusterService, createIndexService, new AliasValidator(Settings.EMPTY), indicesService, nodeServicesProvider, + Settings.EMPTY, clusterService, createIndexService, new AliasValidator(Settings.EMPTY), indicesService, new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS)); final List throwables = new ArrayList<>(); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index a74a3879bef..a4a5f6f5ba6 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -20,8 +20,8 @@ package org.elasticsearch.action.bulk; import org.apache.lucene.util.Constants; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -89,7 +89,7 @@ public class BulkRequestTests extends ESTestCase { assertThat(((UpdateRequest) bulkRequest.requests().get(1)).index(), equalTo("index1")); Script script = ((UpdateRequest) bulkRequest.requests().get(1)).script(); assertThat(script, notNullValue()); - assertThat(script.getScript(), equalTo("counter += param1")); + assertThat(script.getIdOrCode(), equalTo("counter += param1")); assertThat(script.getLang(), equalTo("javascript")); Map scriptParams = script.getParams(); assertThat(scriptParams, notNullValue()); @@ -113,7 +113,7 @@ public class BulkRequestTests extends ESTestCase { public void testBulkAddIterable() { BulkRequest bulkRequest = Requests.bulkRequest(); - List> requests = new ArrayList<>(); + List requests = new ArrayList<>(); requests.add(new IndexRequest("test", "test", "id").source("field", "value")); requests.add(new UpdateRequest("test", "test", "id").doc("field", "value")); requests.add(new DeleteRequest("test", "test", "id")); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index c9fa93f76db..82eee3554e8 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -47,8 +47,10 @@ import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.function.Function; +import static org.elasticsearch.action.DocWriteRequest.OpType; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.script.ScriptService.ScriptType; + +import org.elasticsearch.script.ScriptType; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @@ -120,7 +122,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(bulkItemResponse.getIndex(), equalTo("test")); } - final Script script = new Script("ctx._source.field += 1", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + final Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx._source.field += 1", Collections.emptyMap()); bulkResponse = client().prepareBulk() .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("1").setScript(script)) @@ -257,11 +259,14 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { bulkResponse = client().prepareBulk() .add(client().prepareUpdate().setIndex("test").setType("type1").setId("1").setFields("field") - .setScript(new Script("throw script exception on unknown var", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .setScript(new Script( + ScriptType.INLINE, CustomScriptPlugin.NAME, "throw script exception on unknown var", Collections.emptyMap()))) .add(client().prepareUpdate().setIndex("test").setType("type1").setId("2").setFields("field") - .setScript(new Script("ctx._source.field += 1", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .setScript(new Script( + ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx._source.field += 1", Collections.emptyMap()))) .add(client().prepareUpdate().setIndex("test").setType("type1").setId("3").setFields("field") - .setScript(new Script("throw script exception on unknown var", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .setScript(new Script( + ScriptType.INLINE, CustomScriptPlugin.NAME, "throw script exception on unknown var", Collections.emptyMap()))) .execute().actionGet(); assertThat(bulkResponse.hasFailures(), equalTo(true)); @@ -289,7 +294,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { numDocs++; // this test needs an even num of docs } - final Script script = new Script("ctx._source.counter += 1", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + final Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx._source.counter += 1", Collections.emptyMap()); BulkRequestBuilder builder = client().prepareBulk(); for (int i = 0; i < numDocs; i++) { @@ -309,7 +314,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getVersion(), equalTo(1L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(1L)); assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue(), equalTo(1)); @@ -347,7 +352,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getVersion(), equalTo(2L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(2L)); assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue(), equalTo(2)); @@ -371,14 +376,14 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getVersion(), equalTo(3L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); } } builder = client().prepareBulk(); for (int i = 0; i < numDocs; i++) { builder.add(client().prepareUpdate().setIndex("test").setType("type1").setId(Integer.toString(i)) - .setScript(new Script("ctx.op = \"none\"", ScriptType.INLINE, CustomScriptPlugin.NAME, null))); + .setScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx.op = \"none\"", Collections.emptyMap()))); } response = builder.execute().actionGet(); assertThat(response.buildFailureMessage(), response.hasFailures(), equalTo(false)); @@ -388,13 +393,13 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); } builder = client().prepareBulk(); for (int i = 0; i < numDocs; i++) { builder.add(client().prepareUpdate().setIndex("test").setType("type1").setId(Integer.toString(i)) - .setScript(new Script("ctx.op = \"delete\"", ScriptType.INLINE, CustomScriptPlugin.NAME, null))); + .setScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx.op = \"delete\"", Collections.emptyMap()))); } response = builder.execute().actionGet(); assertThat(response.hasFailures(), equalTo(false)); @@ -404,7 +409,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getIndex(), equalTo("test")); assertThat(response.getItems()[i].getType(), equalTo("type1")); - assertThat(response.getItems()[i].getOpType(), equalTo("update")); + assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); for (int j = 0; j < 5; j++) { GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).get(); assertThat(getResponse.isExists(), equalTo(false)); @@ -747,12 +752,12 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertNoFailures(indexBulkItemResponse); assertThat(bulkItemResponse.getItems().length, is(6)); - assertThat(bulkItemResponse.getItems()[0].getOpType(), is("index")); - assertThat(bulkItemResponse.getItems()[1].getOpType(), is("index")); - assertThat(bulkItemResponse.getItems()[2].getOpType(), is("update")); - assertThat(bulkItemResponse.getItems()[3].getOpType(), is("update")); - assertThat(bulkItemResponse.getItems()[4].getOpType(), is("delete")); - assertThat(bulkItemResponse.getItems()[5].getOpType(), is("delete")); + assertThat(bulkItemResponse.getItems()[0].getOpType(), is(OpType.INDEX)); + assertThat(bulkItemResponse.getItems()[1].getOpType(), is(OpType.INDEX)); + assertThat(bulkItemResponse.getItems()[2].getOpType(), is(OpType.UPDATE)); + assertThat(bulkItemResponse.getItems()[3].getOpType(), is(OpType.UPDATE)); + assertThat(bulkItemResponse.getItems()[4].getOpType(), is(OpType.DELETE)); + assertThat(bulkItemResponse.getItems()[5].getOpType(), is(OpType.DELETE)); } private static String indexOrAlias() { @@ -797,9 +802,9 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(bulkResponse.hasFailures(), is(true)); BulkItemResponse[] responseItems = bulkResponse.getItems(); assertThat(responseItems.length, is(3)); - assertThat(responseItems[0].getOpType(), is("index")); - assertThat(responseItems[1].getOpType(), is("update")); - assertThat(responseItems[2].getOpType(), is("delete")); + assertThat(responseItems[0].getOpType(), is(OpType.INDEX)); + assertThat(responseItems[1].getOpType(), is(OpType.UPDATE)); + assertThat(responseItems[2].getOpType(), is(OpType.DELETE)); } // issue 9821 @@ -809,9 +814,9 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { .add(client().prepareUpdate().setIndex("INVALID.NAME").setType("type1").setId("1").setDoc("field", randomInt())) .add(client().prepareDelete().setIndex("INVALID.NAME").setType("type1").setId("1")).get(); assertThat(bulkResponse.getItems().length, is(3)); - assertThat(bulkResponse.getItems()[0].getOpType(), is("index")); - assertThat(bulkResponse.getItems()[1].getOpType(), is("update")); - assertThat(bulkResponse.getItems()[2].getOpType(), is("delete")); + assertThat(bulkResponse.getItems()[0].getOpType(), is(OpType.INDEX)); + assertThat(bulkResponse.getItems()[1].getOpType(), is(OpType.UPDATE)); + assertThat(bulkResponse.getItems()[2].getOpType(), is(OpType.DELETE)); } } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 4fa640b3adc..c0e735ec33c 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; @@ -212,11 +213,11 @@ public class RetryTests extends ESTestCase { } private BulkItemResponse successfulResponse() { - return new BulkItemResponse(1, "update", new DeleteResponse()); + return new BulkItemResponse(1, OpType.DELETE, new DeleteResponse()); } private BulkItemResponse failedResponse() { - return new BulkItemResponse(1, "update", new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full"))); + return new BulkItemResponse(1, OpType.INDEX, new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full"))); } } } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index 4a2f3da952d..e9e3e20f8b2 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -88,7 +88,7 @@ public class TransportBulkActionTookTests extends ESTestCase { private TransportBulkAction createAction(boolean controlled, AtomicLong expected) { CapturingTransport capturingTransport = new CapturingTransport(); TransportService transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); IndexNameExpressionResolver resolver = new Resolver(Settings.EMPTY); diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index b755da59e17..76447268c7a 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.unit.TimeValue; @@ -38,8 +39,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; -/** - */ public class IndexRequestTests extends ESTestCase { public void testIndexRequestOpTypeFromString() throws Exception { String create = "create"; @@ -47,18 +46,24 @@ public class IndexRequestTests extends ESTestCase { String createUpper = "CREATE"; String indexUpper = "INDEX"; - assertThat(IndexRequest.OpType.fromString(create), equalTo(IndexRequest.OpType.CREATE)); - assertThat(IndexRequest.OpType.fromString(index), equalTo(IndexRequest.OpType.INDEX)); - assertThat(IndexRequest.OpType.fromString(createUpper), equalTo(IndexRequest.OpType.CREATE)); - assertThat(IndexRequest.OpType.fromString(indexUpper), equalTo(IndexRequest.OpType.INDEX)); + IndexRequest indexRequest = new IndexRequest(""); + indexRequest.opType(create); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.CREATE)); + indexRequest.opType(createUpper); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.CREATE)); + indexRequest.opType(index); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.INDEX)); + indexRequest.opType(indexUpper); + assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.INDEX)); } public void testReadBogusString() { try { - IndexRequest.OpType.fromString("foobar"); + IndexRequest indexRequest = new IndexRequest(""); + indexRequest.opType("foobar"); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("opType [foobar] not allowed")); + assertThat(e.getMessage(), equalTo("opType must be 'create' or 'index', found: [foobar]")); } } diff --git a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java index 9adb51e6c44..33dd755e7fb 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/BulkRequestModifierTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; */ import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -116,10 +116,10 @@ public class BulkRequestModifierTests extends ESTestCase { }); List originalResponses = new ArrayList<>(); - for (ActionRequest actionRequest : bulkRequest.requests()) { + for (DocWriteRequest actionRequest : bulkRequest.requests()) { IndexRequest indexRequest = (IndexRequest) actionRequest; IndexResponse indexResponse = new IndexResponse(new ShardId("index", "_na_", 0), indexRequest.type(), indexRequest.id(), 1, 1, true); - originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType().lowercase(), indexResponse)); + originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType(), indexResponse)); } bulkResponseListener.onResponse(new BulkResponse(originalResponses.toArray(new BulkItemResponse[originalResponses.size()]), 0)); diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java index 1316c87e2aa..9dbef147c01 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestActionFilterTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -174,7 +174,7 @@ public class IngestActionFilterTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); for (int i = 0; i < numRequest; i++) { if (rarely()) { - ActionRequest request; + DocWriteRequest request; if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); } else { @@ -196,7 +196,7 @@ public class IngestActionFilterTests extends ESTestCase { verifyZeroInteractions(actionListener); int assertedRequests = 0; - for (ActionRequest actionRequest : bulkRequest.requests()) { + for (DocWriteRequest actionRequest : bulkRequest.requests()) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; assertThat(indexRequest.sourceAsMap().size(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java index 50bd3771bc3..85240c4a8e6 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java @@ -34,7 +34,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -79,7 +78,7 @@ public class IngestProxyActionFilterTests extends ESTestCase { if (i < ingestNodes) { roles.add(DiscoveryNode.Role.INGEST); } - DiscoveryNode node = new DiscoveryNode(nodeId, nodeId, LocalTransportAddress.buildUnique(), attributes, roles, VersionUtils.randomVersion(random())); + DiscoveryNode node = new DiscoveryNode(nodeId, nodeId, buildNewFakeTransportAddress(), attributes, roles, VersionUtils.randomVersion(random())); builder.add(node); if (i == totalNodes - 1) { localNode = node; @@ -89,7 +88,7 @@ public class IngestProxyActionFilterTests extends ESTestCase { ClusterService clusterService = mock(ClusterService.class); when(clusterService.localNode()).thenReturn(localNode); when(clusterService.state()).thenReturn(clusterState.build()); - transportService = new TransportService(Settings.EMPTY, null, null, interceptor); + transportService = new TransportService(Settings.EMPTY, null, null, interceptor, null); return new IngestProxyActionFilter(clusterService, transportService); } @@ -269,15 +268,4 @@ public class IngestProxyActionFilterTests extends ESTestCase { assertTrue(run.get()); } - - private static class IngestNodeMatcher extends CustomTypeSafeMatcher { - private IngestNodeMatcher() { - super("discovery node should be an ingest node"); - } - - @Override - protected boolean matchesSafely(DiscoveryNode node) { - return node.isIngestNode(); - } - } } diff --git a/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java b/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java index 05dcb9d1f1d..feb33772da0 100644 --- a/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java @@ -120,9 +120,10 @@ public class MainActionTests extends ESTestCase { ClusterState state = ClusterState.builder(clusterName).blocks(blocks).build(); when(clusterService.state()).thenReturn(state); - TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), new TransportService(Settings.EMPTY, - null ,null, TransportService.NOOP_TRANSPORT_INTERCEPTOR), - mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), clusterService); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null); + TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), transportService, mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), clusterService); AtomicReference responseRef = new AtomicReference<>(); action.doExecute(new MainRequest(), new ActionListener() { @Override diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java new file mode 100644 index 00000000000..5ab35085e40 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.search; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.PlainShardIterator; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.internal.ShardSearchTransportRequest; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +public class SearchAsyncActionTests extends ESTestCase { + + public void testFanOutAndCollect() throws InterruptedException { + SearchRequest request = new SearchRequest(); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference response = new AtomicReference<>(); + ActionListener responseListener = new ActionListener() { + @Override + public void onResponse(SearchResponse searchResponse) { + response.set((TestSearchResponse) searchResponse); + } + + @Override + public void onFailure(Exception e) { + logger.warn("test failed", e); + fail(e.getMessage()); + } + }; + DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + + Map> nodeToContextMap = new HashMap<>(); + AtomicInteger contextIdGenerator = new AtomicInteger(0); + GroupShardsIterator shardsIter = getShardsIter("idx", randomIntBetween(1, 10), randomBoolean(), primaryNode, replicaNode); + AtomicInteger numFreedContext = new AtomicInteger(); + SearchTransportService transportService = new SearchTransportService(Settings.EMPTY, null) { + @Override + public void sendFreeContext(DiscoveryNode node, long contextId, SearchRequest request) { + numFreedContext.incrementAndGet(); + assertTrue(nodeToContextMap.containsKey(node)); + assertTrue(nodeToContextMap.get(node).remove(contextId)); + } + }; + Map lookup = new HashMap<>(); + lookup.put(primaryNode.getId(), primaryNode); + AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction(logger, transportService, lookup::get, + Collections.emptyMap(), null, request, responseListener, shardsIter, 0, 0, null) { + TestSearchResponse response = new TestSearchResponse(); + + @Override + protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener listener) { + assertTrue("shard: " + request.shardId() + " has been queried twice", response.queried.add(request.shardId())); + TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult(contextIdGenerator.incrementAndGet(), node); + Set ids = nodeToContextMap.computeIfAbsent(node, (n) -> new HashSet<>()); + ids.add(testSearchPhaseResult.id); + if (randomBoolean()) { + listener.onResponse(testSearchPhaseResult); + } else { + new Thread(() -> listener.onResponse(testSearchPhaseResult)).start(); + } + } + + @Override + protected void moveToSecondPhase() throws Exception { + for (int i = 0; i < firstResults.length(); i++) { + TestSearchPhaseResult result = firstResults.get(i); + assertEquals(result.node.getId(), result.shardTarget().getNodeId()); + sendReleaseSearchContext(result.id(), result.node); + } + responseListener.onResponse(response); + latch.countDown(); + } + + @Override + protected String firstPhaseName() { + return "test"; + } + + @Override + protected Executor getExecutor() { + fail("no executor in this class"); + return null; + } + }; + asyncAction.start(); + latch.await(); + assertNotNull(response.get()); + assertFalse(nodeToContextMap.isEmpty()); + assertTrue(nodeToContextMap.containsKey(primaryNode)); + assertEquals(shardsIter.size(), numFreedContext.get()); + assertTrue(nodeToContextMap.get(primaryNode).toString(), nodeToContextMap.get(primaryNode).isEmpty()); + + } + + private GroupShardsIterator getShardsIter(String index, int numShards, boolean doReplicas, DiscoveryNode primaryNode, + DiscoveryNode replicaNode) { + ArrayList list = new ArrayList<>(); + for (int i = 0; i < numShards; i++) { + ArrayList started = new ArrayList<>(); + ArrayList initializing = new ArrayList<>(); + ArrayList unassigned = new ArrayList<>(); + + ShardRouting routing = ShardRouting.newUnassigned(new ShardId(new Index(index, "_na_"), i), true, + RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar")); + routing = routing.initialize(primaryNode.getId(), i + "p", 0); + routing.started(); + started.add(routing); + if (doReplicas) { + routing = ShardRouting.newUnassigned(new ShardId(new Index(index, "_na_"), i), false, + RecoverySource.PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar")); + if (replicaNode != null) { + routing = routing.initialize(replicaNode.getId(), i + "r", 0); + if (randomBoolean()) { + routing.started(); + started.add(routing); + } else { + initializing.add(routing); + } + } else { + unassigned.add(routing); // unused yet + } + } + Collections.shuffle(started, random()); + started.addAll(initializing); + list.add(new PlainShardIterator(new ShardId(new Index(index, "_na_"), i), started)); + } + return new GroupShardsIterator(list); + } + + public static class TestSearchResponse extends SearchResponse { + public final Set queried = new HashSet<>(); + } + + public static class TestSearchPhaseResult implements SearchPhaseResult { + final long id; + final DiscoveryNode node; + SearchShardTarget shardTarget; + + public TestSearchPhaseResult(long id, DiscoveryNode node) { + this.id = id; + this.node = node; + } + + @Override + public long id() { + return id; + } + + @Override + public SearchShardTarget shardTarget() { + return this.shardTarget; + } + + @Override + public void shardTarget(SearchShardTarget shardTarget) { + this.shardTarget = shardTarget; + + } + + @Override + public void readFrom(StreamInput in) throws IOException { + + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 2778a9dbf47..253f0146634 100644 --- a/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.search; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.action.search.SearchPhaseController; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.BigArrays; @@ -57,7 +56,7 @@ public class SearchPhaseControllerTests extends ESTestCase { @Before public void setup() { - searchPhaseController = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null, null); + searchPhaseController = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null); } public void testSort() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchScrollRequestTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchScrollRequestTests.java index 8e91b68d092..e483c718ab8 100644 --- a/core/src/test/java/org/elasticsearch/action/search/SearchScrollRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchScrollRequestTests.java @@ -21,11 +21,14 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; + public class SearchScrollRequestTests extends ESTestCase { public void testSerialization() throws Exception { @@ -58,54 +61,9 @@ public class SearchScrollRequestTests extends ESTestCase { } public void testEqualsAndHashcode() { - SearchScrollRequest firstSearchScrollRequest = createSearchScrollRequest(); - assertNotNull("search scroll request is equal to null", firstSearchScrollRequest); - assertNotEquals("search scroll request is equal to incompatible type", firstSearchScrollRequest, ""); - assertEquals("search scroll request is not equal to self", firstSearchScrollRequest, firstSearchScrollRequest); - assertEquals("same source builder's hashcode returns different values if called multiple times", - firstSearchScrollRequest.hashCode(), firstSearchScrollRequest.hashCode()); - - SearchScrollRequest secondSearchScrollRequest = copyRequest(firstSearchScrollRequest); - assertEquals("search scroll request is not equal to self", secondSearchScrollRequest, secondSearchScrollRequest); - assertEquals("search scroll request is not equal to its copy", firstSearchScrollRequest, secondSearchScrollRequest); - assertEquals("search scroll request is not symmetric", secondSearchScrollRequest, firstSearchScrollRequest); - assertEquals("search scroll request copy's hashcode is different from original hashcode", - firstSearchScrollRequest.hashCode(), secondSearchScrollRequest.hashCode()); - - SearchScrollRequest thirdSearchScrollRequest = copyRequest(secondSearchScrollRequest); - assertEquals("search scroll request is not equal to self", thirdSearchScrollRequest, thirdSearchScrollRequest); - assertEquals("search scroll request is not equal to its copy", secondSearchScrollRequest, thirdSearchScrollRequest); - assertEquals("search scroll request copy's hashcode is different from original hashcode", - secondSearchScrollRequest.hashCode(), thirdSearchScrollRequest.hashCode()); - assertEquals("equals is not transitive", firstSearchScrollRequest, thirdSearchScrollRequest); - assertEquals("search scroll request copy's hashcode is different from original hashcode", - firstSearchScrollRequest.hashCode(), thirdSearchScrollRequest.hashCode()); - assertEquals("equals is not symmetric", thirdSearchScrollRequest, secondSearchScrollRequest); - assertEquals("equals is not symmetric", thirdSearchScrollRequest, firstSearchScrollRequest); - - boolean changed = false; - if (randomBoolean()) { - secondSearchScrollRequest.scrollId(randomAsciiOfLengthBetween(3, 10)); - if (secondSearchScrollRequest.scrollId().equals(firstSearchScrollRequest.scrollId()) == false) { - changed = true; - } - } - if (randomBoolean()) { - secondSearchScrollRequest.scroll(randomPositiveTimeValue()); - if (secondSearchScrollRequest.scroll().equals(firstSearchScrollRequest.scroll()) == false) { - changed = true; - } - } - - if (changed) { - assertNotEquals(firstSearchScrollRequest, secondSearchScrollRequest); - assertNotEquals(firstSearchScrollRequest.hashCode(), secondSearchScrollRequest.hashCode()); - } else { - assertEquals(firstSearchScrollRequest, secondSearchScrollRequest); - assertEquals(firstSearchScrollRequest.hashCode(), secondSearchScrollRequest.hashCode()); - } + checkEqualsAndHashCode(createSearchScrollRequest(), SearchScrollRequestTests::copyRequest, SearchScrollRequestTests::mutate); } - + public static SearchScrollRequest createSearchScrollRequest() { SearchScrollRequest searchScrollRequest = new SearchScrollRequest(randomAsciiOfLengthBetween(3, 10)); searchScrollRequest.scroll(randomPositiveTimeValue()); @@ -118,4 +76,13 @@ public class SearchScrollRequestTests extends ESTestCase { result.scroll(searchScrollRequest.scroll()); return result; } + + private static SearchScrollRequest mutate(SearchScrollRequest original) { + SearchScrollRequest copy = copyRequest(original); + if (randomBoolean()) { + return copy.scrollId(original.scrollId() + "xyz"); + } else { + return copy.scroll(new TimeValue(original.scroll().keepAlive().getMillis() + 1)); + } + } } diff --git a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index 9df5bc82238..8c0e0025749 100644 --- a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -57,7 +56,8 @@ public class TransportMultiSearchActionTests extends ESTestCase { when(actionFilters.filters()).thenReturn(new ActionFilter[0]); ThreadPool threadPool = new ThreadPool(settings); TaskManager taskManager = mock(TaskManager.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR) { + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null) { @Override public TaskManager getTaskManager() { return taskManager; @@ -117,12 +117,12 @@ public class TransportMultiSearchActionTests extends ESTestCase { int numDataNodes = randomIntBetween(1, 10); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); for (int i = 0; i < numDataNodes; i++) { - builder.add(new DiscoveryNode("_id" + i, new LocalTransportAddress("_id" + i), Collections.emptyMap(), + builder.add(new DiscoveryNode("_id" + i, buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT)); } - builder.add(new DiscoveryNode("master", new LocalTransportAddress("mater"), Collections.emptyMap(), + builder.add(new DiscoveryNode("master", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT)); - builder.add(new DiscoveryNode("ingest", new LocalTransportAddress("ingest"), Collections.emptyMap(), + builder.add(new DiscoveryNode("ingest", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.INGEST), Version.CURRENT)); ClusterState state = ClusterState.builder(new ClusterName("_name")).nodes(builder).build(); diff --git a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index a249a0e98ef..2f743f9bf6b 100644 --- a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -49,7 +49,6 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -192,7 +191,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); final TransportService transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); setClusterState(clusterService, TEST_INDEX); @@ -248,7 +247,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { } static DiscoveryNode newNode(int nodeId) { - return new DiscoveryNode("node_" + nodeId, LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + return new DiscoveryNode("node_" + nodeId, buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 87f86c3f596..73085276628 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.discovery.zen.fd.FaultDetection; +import org.elasticsearch.discovery.zen.FaultDetection; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.NetworkDisruption; @@ -43,9 +43,13 @@ import java.util.concurrent.CyclicBarrier; import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) -@ESIntegTestCase.SuppressLocalMode public class IndexingMasterFailoverIT extends ESIntegTestCase { + @Override + protected boolean addMockZenPings() { + return false; + } + @Override protected Collection> nodePlugins() { final HashSet> classes = new HashSet<>(super.nodePlugins()); diff --git a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index a7db99cc201..66fe95be0be 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -38,7 +38,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.MasterNotDiscoveredException; @@ -87,12 +86,13 @@ public class TransportMasterNodeActionTests extends ESTestCase { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(threadPool); - transportService = new TransportService(clusterService.getSettings(), transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + transportService = new TransportService(clusterService.getSettings(), transport, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); - localNode = new DiscoveryNode("local_node", LocalTransportAddress.buildUnique(), Collections.emptyMap(), + localNode = new DiscoveryNode("local_node", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT); - remoteNode = new DiscoveryNode("remote_node", LocalTransportAddress.buildUnique(), Collections.emptyMap(), + remoteNode = new DiscoveryNode("remote_node", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT); allNodes = new DiscoveryNode[]{localNode, remoteNode}; } diff --git a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index 67cc64cb871..10e9d9d3f3d 100644 --- a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; @@ -107,12 +106,17 @@ public class TransportNodesActionTests extends ESTestCase { public void testNewResponse() { TestTransportNodesAction action = getTestTransportNodesAction(); TestNodesRequest request = new TestNodesRequest(); - List expectedNodeResponses = mockList(TestNodeResponse.class, randomIntBetween(0, 2)); + List expectedNodeResponses = mockList(TestNodeResponse::new, randomIntBetween(0, 2)); expectedNodeResponses.add(new TestNodeResponse()); List nodeResponses = new ArrayList<>(expectedNodeResponses); // This should be ignored: nodeResponses.add(new OtherNodeResponse()); - List failures = mockList(FailedNodeException.class, randomIntBetween(0, 2)); + List failures = mockList( + () -> new FailedNodeException( + randomAsciiOfLength(8), + randomAsciiOfLength(8), + new IllegalStateException(randomAsciiOfLength(8))), + randomIntBetween(0, 2)); List allResponses = new ArrayList<>(expectedNodeResponses); allResponses.addAll(failures); @@ -142,10 +146,10 @@ public class TransportNodesActionTests extends ESTestCase { assertEquals(clusterService.state().nodes().getDataNodes().size(), capturedRequests.size()); } - private List mockList(Class clazz, int size) { + private List mockList(Supplier supplier, int size) { List failures = new ArrayList<>(size); for (int i = 0; i < size; ++i) { - failures.add(mock(clazz)); + failures.add(supplier.get()); } return failures; } @@ -178,7 +182,7 @@ public class TransportNodesActionTests extends ESTestCase { transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); int numNodes = randomIntBetween(3, 10); @@ -237,7 +241,7 @@ public class TransportNodesActionTests extends ESTestCase { private static DiscoveryNode newNode(int nodeId, Map attributes, Set roles) { String node = "node_" + nodeId; - return new DiscoveryNode(node, node, LocalTransportAddress.buildUnique(), attributes, roles, Version.CURRENT); + return new DiscoveryNode(node, node, buildNewFakeTransportAddress(), attributes, roles, Version.CURRENT); } private static class TestTransportNodesAction diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 2d098a065b5..260f70e19ed 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.support.replication; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.ShardOperationFailedException; @@ -25,7 +26,6 @@ import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; -import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; @@ -35,7 +35,9 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -45,8 +47,8 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.MockTcpTransport; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.local.LocalTransport; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -89,19 +91,22 @@ public class BroadcastReplicationTests extends ESTestCase { @Before public void setUp() throws Exception { super.setUp(); - LocalTransport transport = new LocalTransport(Settings.EMPTY, threadPool, new NamedWriteableRegistry(Collections.emptyList()), circuitBreakerService); + MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, + threadPool, BigArrays.NON_RECYCLING_INSTANCE, circuitBreakerService, new NamedWriteableRegistry(Collections.emptyList()), + new NetworkService(Settings.EMPTY, Collections.emptyList())); clusterService = createClusterService(threadPool); - transportService = new TransportService(clusterService.getSettings(), transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + transportService = new TransportService(clusterService.getSettings(), transport, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); - broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, threadPool, clusterService, transportService, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), null); + broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, threadPool, clusterService, transportService, + new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), null); } @After public void tearDown() throws Exception { super.tearDown(); - clusterService.close(); - transportService.close(); + IOUtils.close(clusterService, transportService); } @AfterClass @@ -114,7 +119,7 @@ public class BroadcastReplicationTests extends ESTestCase { final String index = "test"; setState(clusterService, state(index, randomBoolean(), randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED)); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Future response = (broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index))); for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { if (randomBoolean()) { @@ -133,11 +138,11 @@ public class BroadcastReplicationTests extends ESTestCase { final String index = "test"; setState(clusterService, state(index, randomBoolean(), ShardRoutingState.STARTED)); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Future response = (broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index))); for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { ReplicationResponse replicationResponse = new ReplicationResponse(); - replicationResponse.setShardInfo(new ReplicationResponse.ShardInfo(1, 1, new ReplicationResponse.ShardInfo.Failure[0])); + replicationResponse.setShardInfo(new ReplicationResponse.ShardInfo(1, 1)); shardRequests.v2().onResponse(replicationResponse); } logger.info("total shards: {}, ", response.get().getTotalShards()); @@ -148,7 +153,7 @@ public class BroadcastReplicationTests extends ESTestCase { final String index = "test"; int numShards = 1 + randomInt(3); setState(clusterService, stateWithAssignedPrimariesAndOneReplica(index, numShards)); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Future response = (broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index))); int succeeded = 0; int failed = 0; @@ -178,7 +183,7 @@ public class BroadcastReplicationTests extends ESTestCase { public void testNoShards() throws InterruptedException, ExecutionException, IOException { setState(clusterService, stateWithNoShard()); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); BroadcastResponse response = executeAndAssertImmediateResponse(broadcastReplicationAction, new DummyBroadcastRequest()); assertBroadcastResponse(0, 0, 0, response, null); } @@ -188,7 +193,7 @@ public class BroadcastReplicationTests extends ESTestCase { final ShardId shardId = new ShardId(index, "_na_", 0); ClusterState clusterState = state(index, randomBoolean(), randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); List shards = broadcastReplicationAction.shards(new DummyBroadcastRequest().indices(shardId.getIndexName()), clusterState); assertThat(shards.size(), equalTo(1)); assertThat(shards.get(0), equalTo(shardId)); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 646ad23a48b..55485b590cf 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -34,8 +34,8 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; import java.util.Arrays; import java.util.Collections; @@ -144,6 +144,91 @@ public class ClusterStateCreationUtils { return state.build(); } + /** + * Creates cluster state with an index that has #(numberOfPrimaries) primary shards in the started state and no replicas. + * The cluster state contains #(numberOfNodes) nodes and assigns primaries to those nodes. + */ + public static ClusterState state(String index, final int numberOfNodes, final int numberOfPrimaries) { + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set nodes = new HashSet<>(); + for (int i = 0; i < numberOfNodes; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.add(node); + nodes.add(node.getId()); + } + discoBuilder.localNodeId(newNode(0).getId()); + discoBuilder.masterNodeId(randomFrom(nodes)); + IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaries).put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(indexMetaData.getIndex()); + for (int i = 0; i < numberOfPrimaries; i++) { + ShardId shardId = new ShardId(indexMetaData.getIndex(), i); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(shardId, randomFrom(nodes), true, ShardRoutingState.STARTED)); + indexRoutingTable.addIndexShard(indexShardRoutingBuilder.build()); + } + + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); + state.routingTable(RoutingTable.builder().add(indexRoutingTable).build()); + return state.build(); + } + + + + /** + * Creates cluster state with the given indices, each index containing #(numberOfPrimaries) + * started primary shards and no replicas. The cluster state contains #(numberOfNodes) nodes + * and assigns primaries to those nodes. + */ + public static ClusterState state(final int numberOfNodes, final String[] indices, final int numberOfPrimaries) { + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set nodes = new HashSet<>(); + for (int i = 0; i < numberOfNodes; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.add(node); + nodes.add(node.getId()); + } + discoBuilder.localNodeId(newNode(0).getId()); + discoBuilder.masterNodeId(newNode(0).getId()); + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + for (String index : indices) { + IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaries).put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(indexMetaData.getIndex()); + for (int i = 0; i < numberOfPrimaries; i++) { + ShardId shardId = new ShardId(indexMetaData.getIndex(), i); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(shardId, randomFrom(nodes), true, ShardRoutingState.STARTED)); + indexRoutingTable.addIndexShard(indexShardRoutingBuilder.build()); + } + + metaData.put(indexMetaData, false); + routingTable.add(indexRoutingTable); + } + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(metaData.generateClusterUuidIfNeeded().build()); + state.routingTable(routingTable.build()); + return state.build(); + } + /** * Creates cluster state with several shards and one replica and all shards STARTED. */ @@ -255,7 +340,7 @@ public class ClusterStateCreationUtils { } private static DiscoveryNode newNode(int nodeId) { - return new DiscoveryNode("node_" + nodeId, LocalTransportAddress.buildUnique(), Collections.emptyMap(), + return new DiscoveryNode("node_" + nodeId, ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 3019376bd36..47325bf5f98 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -259,7 +259,7 @@ public class ReplicationOperationTests extends ESTestCase { final ClusterState initialState, final ClusterState changedState) throws Exception { AtomicReference state = new AtomicReference<>(initialState); - logger.debug("--> using initial state:\n{}", state.get().prettyPrint()); + logger.debug("--> using initial state:\n{}", state.get()); final long primaryTerm = initialState.getMetaData().index(shardId.getIndexName()).primaryTerm(shardId.id()); final ShardRouting primaryShard = state.get().routingTable().shardRoutingTable(shardId).primaryShard(); final TestPrimary primary = new TestPrimary(primaryShard, primaryTerm) { @@ -267,7 +267,7 @@ public class ReplicationOperationTests extends ESTestCase { public Result perform(Request request) throws Exception { Result result = super.perform(request); state.set(changedState); - logger.debug("--> state after primary operation:\n{}", state.get().prettyPrint()); + logger.debug("--> state after primary operation:\n{}", state.get()); return result; } }; @@ -306,8 +306,7 @@ public class ReplicationOperationTests extends ESTestCase { logger.debug("using active shard count of [{}], assigned shards [{}], total shards [{}]." + " expecting op to [{}]. using state: \n{}", request.waitForActiveShards(), 1 + assignedReplicas, 1 + assignedReplicas + unassignedReplicas, - passesActiveShardCheck ? "succeed" : "retry", - state.prettyPrint()); + passesActiveShardCheck ? "succeed" : "retry", state); final long primaryTerm = state.metaData().index(index).primaryTerm(shardId.id()); final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(index).shard(shardId.id()); PlainActionFuture listener = new PlainActionFuture<>(); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java new file mode 100644 index 00000000000..3740f8dd5f7 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Locale; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class ReplicationResponseTests extends ESTestCase { + + public void testShardInfoToString() { + final int total = 5; + final int successful = randomIntBetween(1, total); + final ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(total, successful); + assertThat( + shardInfo.toString(), + equalTo(String.format(Locale.ROOT, "ShardInfo{total=5, successful=%d, failures=[]}", successful))); + } + +} diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index f98b6c47d93..655244e286f 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -80,7 +80,7 @@ import org.junit.Before; import org.junit.BeforeClass; import java.io.IOException; -import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -152,7 +152,7 @@ public class TransportReplicationActionTests extends ESTestCase { transport = new CapturingTransport(); clusterService = createClusterService(threadPool); transportService = new TransportService(clusterService.getSettings(), transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); @@ -230,7 +230,7 @@ public class TransportReplicationActionTests extends ESTestCase { randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); ReplicationTask task = maybeTask(); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Request request = new Request(shardId).timeout("1ms"); PlainActionFuture listener = new PlainActionFuture<>(); @@ -249,7 +249,7 @@ public class TransportReplicationActionTests extends ESTestCase { assertTrue(request.isRetrySet.get()); setState(clusterService, state(index, true, ShardRoutingState.STARTED)); - logger.debug("--> primary assigned state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> primary assigned state:\n{}", clusterService.state()); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); @@ -278,7 +278,7 @@ public class TransportReplicationActionTests extends ESTestCase { String relocationTargetNode = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId(); state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(relocationTargetNode)).build(); setState(clusterService, state); - logger.debug("--> relocation ongoing state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> relocation ongoing state:\n{}", clusterService.state()); Request request = new Request(shardId).timeout("1ms").routedBasedOnClusterVersion(clusterService.state().version() + 1); PlainActionFuture listener = new PlainActionFuture<>(); @@ -298,10 +298,10 @@ public class TransportReplicationActionTests extends ESTestCase { ShardRouting relocationTarget = clusterService.state().getRoutingTable().shardRoutingTable(shardId) .shardsWithState(ShardRoutingState.INITIALIZING).get(0); AllocationService allocationService = ESAllocationTestCase.createAllocationService(); - ClusterState updatedState = allocationService.applyStartedShards(state, Arrays.asList(relocationTarget)); + ClusterState updatedState = allocationService.applyStartedShards(state, Collections.singletonList(relocationTarget)); setState(clusterService, updatedState); - logger.debug("--> relocation complete state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> relocation complete state:\n{}", clusterService.state()); IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); @@ -318,7 +318,7 @@ public class TransportReplicationActionTests extends ESTestCase { // no replicas in oder to skip the replication part setState(clusterService, state(index, true, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Request request = new Request(new ShardId("unknown_index", "_na_", 0)).timeout("1ms"); PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); @@ -342,7 +342,7 @@ public class TransportReplicationActionTests extends ESTestCase { final ShardId shardId = new ShardId(index, "_na_", 0); // no replicas in order to skip the replication part setState(clusterService, stateWithActivePrimary(index, true, randomInt(3))); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Request request = new Request(shardId); boolean timeout = randomBoolean(); if (timeout) { @@ -399,7 +399,7 @@ public class TransportReplicationActionTests extends ESTestCase { ReplicationTask task = maybeTask(); setState(clusterService, stateWithActivePrimary(index, randomBoolean(), 3)); - logger.debug("using state: \n{}", clusterService.state().prettyPrint()); + logger.debug("using state: \n{}", clusterService.state()); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); @@ -548,7 +548,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); ClusterState state = stateWithActivePrimary(index, true, 1 + randomInt(3), randomInt(2)); - logger.info("using state: {}", state.prettyPrint()); + logger.info("using state: {}", state); setState(clusterService, state); // check that at unknown node fails @@ -656,7 +656,7 @@ public class TransportReplicationActionTests extends ESTestCase { // we use one replica to check the primary term was set on the operation and sent to the replica setState(clusterService, state(index, true, ShardRoutingState.STARTED, randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.STARTED))); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); final ShardRouting routingEntry = clusterService.state().getRoutingTable().index("test").shard(0).primaryShard(); Request request = new Request(shardId); TransportReplicationAction.ConcreteShardRequest concreteShardRequest = @@ -677,15 +677,7 @@ public class TransportReplicationActionTests extends ESTestCase { }; Action action = - new Action(Settings.EMPTY, "testSeqNoIsSetOnPrimary", transportService, clusterService, shardStateAction, threadPool) { - @Override - protected void acquirePrimaryShardReference( - ShardId shardId, - String allocationId, - ActionListener onReferenceAcquired) { - onReferenceAcquired.onResponse(new PrimaryShardReference(shard, releasable)); - } - }; + new Action(Settings.EMPTY, "testSeqNoIsSetOnPrimary", transportService, clusterService, shardStateAction, threadPool); TransportReplicationAction.PrimaryOperationTransportHandler primaryPhase = action.new PrimaryOperationTransportHandler(); @@ -702,7 +694,7 @@ public class TransportReplicationActionTests extends ESTestCase { // no replica, we only want to test on primary final ClusterState state = state(index, true, ShardRoutingState.STARTED); setState(clusterService, state); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); final ShardRouting primaryShard = state.routingTable().shardRoutingTable(shardId).primaryShard(); Request request = new Request(shardId); PlainActionFuture listener = new PlainActionFuture<>(); @@ -763,7 +755,7 @@ public class TransportReplicationActionTests extends ESTestCase { Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, shardStateAction, threadPool) { @Override - protected ReplicaResult shardOperationOnReplica(Request request) { + protected ReplicaResult shardOperationOnReplica(Request request, IndexShard replica) { assertIndexShardCounter(1); assertPhase(task, "replica"); if (throwException) { @@ -883,7 +875,7 @@ public class TransportReplicationActionTests extends ESTestCase { Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, shardStateAction, threadPool) { @Override - protected ReplicaResult shardOperationOnReplica(Request request) { + protected ReplicaResult shardOperationOnReplica(Request request, IndexShard replica) { assertPhase(task, "replica"); if (throwException.get()) { throw new RetryOnReplicaException(shardId, "simulation"); @@ -925,8 +917,8 @@ public class TransportReplicationActionTests extends ESTestCase { final TransportReplicationAction.ConcreteShardRequest concreteShardRequest = (TransportReplicationAction.ConcreteShardRequest) capturedRequest; assertThat(concreteShardRequest.getRequest(), equalTo(expectedRequest)); + assertThat(((Request)concreteShardRequest.getRequest()).isRetrySet.get(), equalTo(true)); assertThat(concreteShardRequest.getTargetAllocationID(), equalTo(expectedAllocationId.getId())); - } @@ -1015,14 +1007,14 @@ public class TransportReplicationActionTests extends ESTestCase { } @Override - protected PrimaryResult shardOperationOnPrimary(Request shardRequest) throws Exception { + protected PrimaryResult shardOperationOnPrimary(Request shardRequest, IndexShard primary) throws Exception { boolean executedBefore = shardRequest.processedOnPrimary.getAndSet(true); assert executedBefore == false : "request has already been executed on the primary"; return new PrimaryResult(shardRequest, new Response()); } @Override - protected ReplicaResult shardOperationOnReplica(Request request) { + protected ReplicaResult shardOperationOnReplica(Request request, IndexShard replica) { request.processedOnReplicas.incrementAndGet(); return new ReplicaResult(); } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index a554ca53d99..571bbfa72e0 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -28,9 +28,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -62,15 +62,16 @@ public class TransportWriteActionTests extends ESTestCase { noRefreshCall(TestAction::shardOperationOnReplica, TestAction.WriteReplicaResult::respond); } - private void noRefreshCall(ThrowingBiFunction action, + private void noRefreshCall(ThrowingTriFunction action, BiConsumer> responder) throws Exception { TestRequest request = new TestRequest(); request.setRefreshPolicy(RefreshPolicy.NONE); // The default, but we'll set it anyway just to be explicit - Result result = action.apply(new TestAction(), request); + Result result = action.apply(new TestAction(), request, indexShard); CapturingActionListener listener = new CapturingActionListener<>(); responder.accept(result, listener); assertNotNull(listener.response); + assertNull(listener.failure); verify(indexShard, never()).refresh(any()); verify(indexShard, never()).addRefreshListener(any(), any()); } @@ -83,15 +84,16 @@ public class TransportWriteActionTests extends ESTestCase { immediateRefresh(TestAction::shardOperationOnReplica, TestAction.WriteReplicaResult::respond, r -> {}); } - private void immediateRefresh(ThrowingBiFunction action, + private void immediateRefresh(ThrowingTriFunction action, BiConsumer> responder, Consumer responseChecker) throws Exception { TestRequest request = new TestRequest(); request.setRefreshPolicy(RefreshPolicy.IMMEDIATE); - Result result = action.apply(new TestAction(), request); + Result result = action.apply(new TestAction(), request, indexShard); CapturingActionListener listener = new CapturingActionListener<>(); responder.accept(result, listener); assertNotNull(listener.response); + assertNull(listener.failure); responseChecker.accept(listener.response); verify(indexShard).refresh("refresh_flag_index"); verify(indexShard, never()).addRefreshListener(any(), any()); @@ -106,12 +108,12 @@ public class TransportWriteActionTests extends ESTestCase { waitForRefresh(TestAction::shardOperationOnReplica, TestAction.WriteReplicaResult::respond, (r, forcedRefresh) -> {}); } - private void waitForRefresh(ThrowingBiFunction action, + private void waitForRefresh(ThrowingTriFunction action, BiConsumer> responder, BiConsumer resultChecker) throws Exception { TestRequest request = new TestRequest(); request.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); - Result result = action.apply(new TestAction(), request); + Result result = action.apply(new TestAction(), request, indexShard); CapturingActionListener listener = new CapturingActionListener<>(); responder.accept(result, listener); assertNull(listener.response); // Haven't reallresponded yet @@ -125,35 +127,75 @@ public class TransportWriteActionTests extends ESTestCase { boolean forcedRefresh = randomBoolean(); refreshListener.getValue().accept(forcedRefresh); assertNotNull(listener.response); + assertNull(listener.failure); resultChecker.accept(listener.response, forcedRefresh); } - private class TestAction extends TransportWriteAction { + public void testDocumentFailureInShardOperationOnPrimary() throws Exception { + TestRequest request = new TestRequest(); + TestAction testAction = new TestAction(true, true); + TransportWriteAction.WritePrimaryResult writePrimaryResult = + testAction.shardOperationOnPrimary(request, indexShard); + CapturingActionListener listener = new CapturingActionListener<>(); + writePrimaryResult.respond(listener); + assertNull(listener.response); + assertNotNull(listener.failure); + } + + public void testDocumentFailureInShardOperationOnReplica() throws Exception { + TestRequest request = new TestRequest(); + TestAction testAction = new TestAction(randomBoolean(), true); + TransportWriteAction.WriteReplicaResult writeReplicaResult = + testAction.shardOperationOnReplica(request, indexShard); + CapturingActionListener listener = new CapturingActionListener<>(); + writeReplicaResult.respond(listener); + assertNull(listener.response); + assertNotNull(listener.failure); + } + + private class TestAction extends TransportWriteAction { + + private final boolean withDocumentFailureOnPrimary; + private final boolean withDocumentFailureOnReplica; + protected TestAction() { - super(Settings.EMPTY, "test", new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR), - null, null, null, null, new ActionFilters(new HashSet<>()), - new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, ThreadPool.Names.SAME); + this(false, false); } - - @Override - protected IndexShard indexShard(TestRequest request) { - return indexShard; - } - - @Override - protected WriteResult onPrimaryShard(TestRequest request, IndexShard indexShard) throws Exception { - return new WriteResult<>(new TestResponse(), location); - } - - @Override - protected Location onReplicaShard(TestRequest request, IndexShard indexShard) { - return location; + protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentFailureOnReplica) { + super(Settings.EMPTY, "test", + new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null), null, null, null, + null, new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, + TestRequest::new, ThreadPool.Names.SAME); + this.withDocumentFailureOnPrimary = withDocumentFailureOnPrimary; + this.withDocumentFailureOnReplica = withDocumentFailureOnReplica; } @Override protected TestResponse newResponseInstance() { return new TestResponse(); } + + @Override + protected WritePrimaryResult shardOperationOnPrimary(TestRequest request, IndexShard primary) throws Exception { + final WritePrimaryResult primaryResult; + if (withDocumentFailureOnPrimary) { + primaryResult = new WritePrimaryResult(request, null, null, new RuntimeException("simulated"), primary); + } else { + primaryResult = new WritePrimaryResult(request, new TestResponse(), location, null, primary); + } + return primaryResult; + } + + @Override + protected WriteReplicaResult shardOperationOnReplica(TestRequest request, IndexShard replica) throws Exception { + final WriteReplicaResult replicaResult; + if (withDocumentFailureOnReplica) { + replicaResult = new WriteReplicaResult(request, null, new RuntimeException("simulated"), replica); + } else { + replicaResult = new WriteReplicaResult(request, location, null, replica); + } + return replicaResult; + } } private static class TestRequest extends ReplicatedWriteRequest { @@ -173,6 +215,7 @@ public class TransportWriteActionTests extends ESTestCase { private static class CapturingActionListener implements ActionListener { private R response; + private Exception failure; @Override public void onResponse(R response) { @@ -180,12 +223,12 @@ public class TransportWriteActionTests extends ESTestCase { } @Override - public void onFailure(Exception e) { - throw new RuntimeException(e); + public void onFailure(Exception failure) { + this.failure = failure; } } - private interface ThrowingBiFunction { - R apply(A a, B b) throws Exception; + private interface ThrowingTriFunction { + R apply(A a, B b, C c) throws Exception; } } diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index 1d736060568..96e90cedf7f 100644 --- a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -137,12 +137,14 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { THREAD_POOL = new TestThreadPool(TransportInstanceSingleOperationActionTests.class.getSimpleName()); } + @Override @Before public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); - transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); action = new TestTransportInstanceSingleOperationAction( @@ -155,6 +157,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { ); } + @Override @After public void tearDown() throws Exception { super.tearDown(); diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index cb27a527f63..3b27bbff9ce 100644 --- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -28,13 +28,25 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.env.Environment; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptContextRegistry; +import org.elasticsearch.script.ScriptEngineRegistry; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.ScriptSettings; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.ResourceWatcherService; +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.arrayContaining; @@ -53,11 +65,11 @@ public class UpdateRequestTests extends ESTestCase { .endObject()); Script script = request.script(); assertThat(script, notNullValue()); - assertThat(script.getScript(), equalTo("script1")); + assertThat(script.getIdOrCode(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); Map params = script.getParams(); - assertThat(params, nullValue()); + assertThat(params, equalTo(Collections.emptyMap())); // simple verbose script request.fromXContent(XContentFactory.jsonBuilder().startObject() @@ -65,11 +77,11 @@ public class UpdateRequestTests extends ESTestCase { .endObject()); script = request.script(); assertThat(script, notNullValue()); - assertThat(script.getScript(), equalTo("script1")); + assertThat(script.getIdOrCode(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); params = script.getParams(); - assertThat(params, nullValue()); + assertThat(params, equalTo(Collections.emptyMap())); // script with params request = new UpdateRequest("test", "type", "1"); @@ -82,7 +94,7 @@ public class UpdateRequestTests extends ESTestCase { .endObject().endObject()); script = request.script(); assertThat(script, notNullValue()); - assertThat(script.getScript(), equalTo("script1")); + assertThat(script.getIdOrCode(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); params = script.getParams(); @@ -96,7 +108,7 @@ public class UpdateRequestTests extends ESTestCase { .field("inline", "script1").endObject().endObject()); script = request.script(); assertThat(script, notNullValue()); - assertThat(script.getScript(), equalTo("script1")); + assertThat(script.getIdOrCode(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); params = script.getParams(); @@ -121,7 +133,7 @@ public class UpdateRequestTests extends ESTestCase { .endObject().endObject()); script = request.script(); assertThat(script, notNullValue()); - assertThat(script.getScript(), equalTo("script1")); + assertThat(script.getIdOrCode(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); params = script.getParams(); @@ -148,7 +160,7 @@ public class UpdateRequestTests extends ESTestCase { .endObject().endObject()); script = request.script(); assertThat(script, notNullValue()); - assertThat(script.getScript(), equalTo("script1")); + assertThat(script.getIdOrCode(), equalTo("script1")); assertThat(script.getType(), equalTo(ScriptType.INLINE)); assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); params = script.getParams(); @@ -184,9 +196,10 @@ public class UpdateRequestTests extends ESTestCase { .doc(jsonBuilder().startObject().field("fooz", "baz").endObject()) .upsert(indexRequest); + long nowInMillis = randomPositiveLong(); // We simulate that the document is not existing yet GetResult getResult = new GetResult("test", "type1", "1", 0, false, null, null); - UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0),updateRequest, getResult); + UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0),updateRequest, getResult, () -> nowInMillis); Streamable action = result.action(); assertThat(action, instanceOf(IndexRequest.class)); IndexRequest indexAction = (IndexRequest) action; @@ -203,7 +216,7 @@ public class UpdateRequestTests extends ESTestCase { // We simulate that the document is not existing yet getResult = new GetResult("test", "type1", "2", 0, false, null, null); - result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult); + result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult, () -> nowInMillis); action = result.action(); assertThat(action, instanceOf(IndexRequest.class)); indexAction = (IndexRequest) action; @@ -213,12 +226,8 @@ public class UpdateRequestTests extends ESTestCase { // Related to issue #15822 public void testInvalidBodyThrowsParseException() throws Exception { UpdateRequest request = new UpdateRequest("test", "type", "1"); - try { - request.fromXContent(new byte[] { (byte) '"' }); - fail("Should have thrown a ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), equalTo("Failed to derive xcontent")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> request.fromXContent(new byte[] { (byte) '"' })); + assertThat(e.getMessage(), equalTo("Failed to derive xcontent")); } // Related to issue 15338 @@ -276,4 +285,70 @@ public class UpdateRequestTests extends ESTestCase { assertThat(request.fetchSource().includes()[0], equalTo("path.inner.*")); assertThat(request.fetchSource().excludes()[0], equalTo("another.inner.*")); } + + public void testNowInScript() throws IOException { + Path genericConfigFolder = createTempDir(); + Settings baseSettings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(Environment.PATH_CONF_SETTING.getKey(), genericConfigFolder) + .build(); + Environment environment = new Environment(baseSettings); + Map, Object>> scripts = new HashMap<>(); + scripts.put("ctx._source.update_timestamp = ctx._now", + (vars) -> { + Map ctx = (Map) vars.get("ctx"); + Map source = (Map) ctx.get("_source"); + source.put("update_timestamp", ctx.get("_now")); + return null;}); + scripts.put("ctx._timestamp = ctx._now", + (vars) -> { + Map ctx = (Map) vars.get("ctx"); + ctx.put("_timestamp", ctx.get("_now")); + return null;}); + ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList()); + ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(Collections.singletonList(new MockScriptEngine("mock", + scripts))); + + ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); + ScriptService scriptService = new ScriptService(baseSettings, environment, + new ResourceWatcherService(baseSettings, null), scriptEngineRegistry, scriptContextRegistry, scriptSettings); + TimeValue providedTTLValue = TimeValue.parseTimeValue(randomTimeValue(), null, "ttl"); + Settings settings = settings(Version.CURRENT).build(); + + UpdateHelper updateHelper = new UpdateHelper(settings, scriptService); + + // We just upsert one document with now() using a script + IndexRequest indexRequest = new IndexRequest("test", "type1", "2") + .source(jsonBuilder().startObject().field("foo", "bar").endObject()) + .ttl(providedTTLValue); + + { + UpdateRequest updateRequest = new UpdateRequest("test", "type1", "2") + .upsert(indexRequest) + .script(new Script(ScriptType.INLINE, "mock", "ctx._source.update_timestamp = ctx._now", Collections.emptyMap())) + .scriptedUpsert(true); + long nowInMillis = randomPositiveLong(); + // We simulate that the document is not existing yet + GetResult getResult = new GetResult("test", "type1", "2", 0, false, null, null); + UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult, () -> nowInMillis); + Streamable action = result.action(); + assertThat(action, instanceOf(IndexRequest.class)); + IndexRequest indexAction = (IndexRequest) action; + assertEquals(indexAction.sourceAsMap().get("update_timestamp"), nowInMillis); + } + { + UpdateRequest updateRequest = new UpdateRequest("test", "type1", "2") + .upsert(indexRequest) + .script(new Script(ScriptType.INLINE, "mock", "ctx._timestamp = ctx._now", Collections.emptyMap())) + .scriptedUpsert(true); + long nowInMillis = randomPositiveLong(); + // We simulate that the document is not existing yet + GetResult getResult = new GetResult("test", "type1", "2", 0, true, new BytesArray("{}"), null); + UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult, () -> nowInMillis); + Streamable action = result.action(); + assertThat(action, instanceOf(IndexRequest.class)); + IndexRequest indexAction = (IndexRequest) action; + assertEquals(indexAction.timestamp(), Long.toString(nowInMillis)); + } + } } diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java index 9813731017d..477a40b435a 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java @@ -28,10 +28,12 @@ import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.test.ESTestCase; +import java.net.InetAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -41,6 +43,7 @@ import static org.hamcrest.CoreMatchers.allOf; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.mock; @@ -55,13 +58,11 @@ public class BootstrapCheckTests extends ESTestCase { // nothing should happen since we are in non-production mode final List transportAddresses = new ArrayList<>(); for (int i = 0; i < randomIntBetween(1, 8); i++) { - TransportAddress localTransportAddress = mock(TransportAddress.class); - when(localTransportAddress.isLoopbackOrLinkLocalAddress()).thenReturn(true); + TransportAddress localTransportAddress = new TransportAddress(InetAddress.getLoopbackAddress(), i); transportAddresses.add(localTransportAddress); } - TransportAddress publishAddress = mock(TransportAddress.class); - when(publishAddress.isLoopbackOrLinkLocalAddress()).thenReturn(true); + TransportAddress publishAddress = new TransportAddress(InetAddress.getLoopbackAddress(), 0); BoundTransportAddress boundTransportAddress = mock(BoundTransportAddress.class); when(boundTransportAddress.boundAddresses()).thenReturn(transportAddresses.toArray(new TransportAddress[0])); when(boundTransportAddress.publishAddress()).thenReturn(publishAddress); @@ -83,18 +84,17 @@ public class BootstrapCheckTests extends ESTestCase { public void testEnforceLimitsWhenBoundToNonLocalAddress() { final List transportAddresses = new ArrayList<>(); - final TransportAddress nonLocalTransportAddress = mock(TransportAddress.class); - when(nonLocalTransportAddress.isLoopbackOrLinkLocalAddress()).thenReturn(false); + final TransportAddress nonLocalTransportAddress = buildNewFakeTransportAddress(); transportAddresses.add(nonLocalTransportAddress); for (int i = 0; i < randomIntBetween(0, 7); i++) { - final TransportAddress randomTransportAddress = mock(TransportAddress.class); - when(randomTransportAddress.isLoopbackOrLinkLocalAddress()).thenReturn(randomBoolean()); + final TransportAddress randomTransportAddress = randomBoolean() ? buildNewFakeTransportAddress() : + new TransportAddress(InetAddress.getLoopbackAddress(), i); transportAddresses.add(randomTransportAddress); } - final TransportAddress publishAddress = mock(TransportAddress.class); - when(publishAddress.isLoopbackOrLinkLocalAddress()).thenReturn(randomBoolean()); + final TransportAddress publishAddress = randomBoolean() ? buildNewFakeTransportAddress() : + new TransportAddress(InetAddress.getLoopbackAddress(), 0); final BoundTransportAddress boundTransportAddress = mock(BoundTransportAddress.class); Collections.shuffle(transportAddresses, random()); @@ -108,14 +108,11 @@ public class BootstrapCheckTests extends ESTestCase { final List transportAddresses = new ArrayList<>(); for (int i = 0; i < randomIntBetween(1, 8); i++) { - final TransportAddress randomTransportAddress = mock(TransportAddress.class); - when(randomTransportAddress.isLoopbackOrLinkLocalAddress()).thenReturn(false); + final TransportAddress randomTransportAddress = buildNewFakeTransportAddress(); transportAddresses.add(randomTransportAddress); } - final TransportAddress publishAddress = mock(TransportAddress.class); - when(publishAddress.isLoopbackOrLinkLocalAddress()).thenReturn(true); - + final TransportAddress publishAddress = new TransportAddress(InetAddress.getLoopbackAddress(), 0); final BoundTransportAddress boundTransportAddress = mock(BoundTransportAddress.class); when(boundTransportAddress.boundAddresses()).thenReturn(transportAddresses.toArray(new TransportAddress[0])); when(boundTransportAddress.publishAddress()).thenReturn(publishAddress); @@ -535,6 +532,78 @@ public class BootstrapCheckTests extends ESTestCase { consumer.accept(e); } + public void testG1GCCheck() throws NodeValidationException { + final AtomicBoolean isG1GCEnabled = new AtomicBoolean(true); + final AtomicBoolean isJava8 = new AtomicBoolean(true); + final AtomicReference jvmVersion = + new AtomicReference<>(String.format(Locale.ROOT, "25.%d-b%d", randomIntBetween(0, 39), randomIntBetween(1, 128))); + final BootstrapCheck.G1GCCheck oracleCheck = new BootstrapCheck.G1GCCheck() { + + @Override + String jvmVendor() { + return "Oracle Corporation"; + } + + @Override + boolean isG1GCEnabled() { + return isG1GCEnabled.get(); + } + + @Override + String jvmVersion() { + return jvmVersion.get(); + } + + @Override + boolean isJava8() { + return isJava8.get(); + } + + }; + + final NodeValidationException e = + expectThrows( + NodeValidationException.class, + () -> BootstrapCheck.check(true, Collections.singletonList(oracleCheck), "testG1GCCheck")); + assertThat( + e.getMessage(), + containsString( + "JVM version [" + jvmVersion.get() + "] can cause data corruption when used with G1GC; upgrade to at least Java 8u40")); + + // if G1GC is disabled, nothing should happen + isG1GCEnabled.set(false); + BootstrapCheck.check(true, Collections.singletonList(oracleCheck), "testG1GCCheck"); + + // if on or after update 40, nothing should happen independent of whether or not G1GC is enabled + isG1GCEnabled.set(randomBoolean()); + jvmVersion.set(String.format(Locale.ROOT, "25.%d-b%d", randomIntBetween(40, 112), randomIntBetween(1, 128))); + BootstrapCheck.check(true, Collections.singletonList(oracleCheck), "testG1GCCheck"); + + final BootstrapCheck.G1GCCheck nonOracleCheck = new BootstrapCheck.G1GCCheck() { + + @Override + String jvmVendor() { + return randomAsciiOfLength(8); + } + + }; + + // if not on an Oracle JVM, nothing should happen + BootstrapCheck.check(true, Collections.singletonList(nonOracleCheck), "testG1GCCheck"); + + final BootstrapCheck.G1GCCheck nonJava8Check = new BootstrapCheck.G1GCCheck() { + + @Override + boolean isJava8() { + return false; + } + + }; + + // if not Java 8, nothing should happen + BootstrapCheck.check(true, Collections.singletonList(nonJava8Check), "testG1GCCheck"); + } + public void testAlwaysEnforcedChecks() { final BootstrapCheck.Check check = new BootstrapCheck.Check() { @Override diff --git a/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index 55d5e65580a..d38d346d6c1 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -111,18 +111,6 @@ public class JarHellTests extends ESTestCase { } } - public void testLog4jThrowableProxyLeniency() throws Exception { - Path dir = createTempDir(); - URL[] jars = {makeJar(dir, "foo.jar", null, "org.apache.logging.log4j.core.impl.ThrowableProxy.class"), makeJar(dir, "bar.jar", null, "org.apache.logging.log4j.core.impl.ThrowableProxy.class")}; - JarHell.checkJarHell(jars); - } - - public void testLog4jServerLeniency() throws Exception { - Path dir = createTempDir(); - URL[] jars = {makeJar(dir, "foo.jar", null, "org.apache.logging.log4j.core.jmx.Server.class"), makeJar(dir, "bar.jar", null, "org.apache.logging.log4j.core.jmx.Server.class")}; - JarHell.checkJarHell(jars); - } - public void testWithinSingleJar() throws Exception { // the java api for zip file does not allow creating duplicate entries (good!) so // this bogus jar had to be constructed with ant diff --git a/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java index b3862f5af16..d36a6bb9491 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java @@ -74,6 +74,7 @@ public class MaxMapCountCheckTests extends ESTestCase { when(reader.readLine()).thenThrow(ioException); final Logger logger = ESLoggerFactory.getLogger("testGetMaxMapCountIOException"); final MockLogAppender appender = new MockLogAppender(); + appender.start(); appender.addExpectation( new ParameterizedMessageLoggingExpectation( "expected logged I/O exception", @@ -87,6 +88,7 @@ public class MaxMapCountCheckTests extends ESTestCase { appender.assertAllExpectationsMatched(); verify(reader).close(); Loggers.removeAppender(logger, appender); + appender.stop(); } { @@ -94,6 +96,7 @@ public class MaxMapCountCheckTests extends ESTestCase { when(reader.readLine()).thenReturn("eof"); final Logger logger = ESLoggerFactory.getLogger("testGetMaxMapCountNumberFormatException"); final MockLogAppender appender = new MockLogAppender(); + appender.start(); appender.addExpectation( new ParameterizedMessageLoggingExpectation( "expected logged number format exception", @@ -107,6 +110,7 @@ public class MaxMapCountCheckTests extends ESTestCase { appender.assertAllExpectationsMatched(); verify(reader).close(); Loggers.removeAppender(logger, appender); + appender.stop(); } } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java index 75f089b3488..7f169ee1d2c 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java @@ -82,8 +82,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -/** - */ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase { /** diff --git a/core/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatIT.java index eb6648cad02..fd78fc147fa 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatIT.java @@ -23,11 +23,11 @@ import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterStateStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESBackcompatTestCase; import org.elasticsearch.transport.MockTransportClient; @@ -50,7 +50,6 @@ public class ClusterStateBackwardsCompatIT extends ESBackcompatTestCase { tc.addTransportAddress(n.getNode().getAddress()); ClusterStateResponse response = tc.admin().cluster().prepareState().execute().actionGet(); - assertThat(response.getState().status(), equalTo(ClusterState.ClusterStateStatus.UNKNOWN)); assertNotNull(response.getClusterName()); assertTrue(response.getState().getMetaData().hasIndex("test")); } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 8e958457e67..6113f7e5aac 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; import org.elasticsearch.Version; +import org.elasticsearch.VersionTests; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.segments.IndexSegments; @@ -82,7 +83,9 @@ import java.util.SortedSet; import java.util.TreeSet; import static org.elasticsearch.test.OldIndexUtils.assertUpgradeWorks; +import static org.elasticsearch.test.OldIndexUtils.getIndexDir; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThanOrEqualTo; // needs at least 2 nodes since it bumps replicas to 1 @@ -187,7 +190,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { SortedSet expectedVersions = new TreeSet<>(); for (Version v : VersionUtils.allVersions()) { if (VersionUtils.isSnapshot(v)) continue; // snapshots are unreleased, so there is no backcompat yet - if (v.isAlpha()) continue; // no guarantees for alpha releases + if (v.isRelease() == false) continue; // no guarantees for prereleases if (v.onOrBefore(Version.V_2_0_0_beta1)) continue; // we can only test back one major lucene version if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself expectedVersions.add("index-" + v.toString() + ".zip"); @@ -246,6 +249,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { assertUpgradeWorks(client(), indexName, version); assertDeleteByQueryWorked(indexName, version); assertPositionIncrementGapDefaults(indexName, version); + assertAliasWithBadName(indexName, version); unloadIndex(indexName); } @@ -431,6 +435,31 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { } } + private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0"); + + public void testUnreleasedVersion() { + VersionTests.assertUnknownVersion(VERSION_5_1_0_UNRELEASED); + } + + /** + * Search on an alias that contains illegal characters that would prevent it from being created after 5.1.0. It should still be + * search-able though. + */ + void assertAliasWithBadName(String indexName, Version version) throws Exception { + if (version.onOrAfter(VERSION_5_1_0_UNRELEASED)) { + return; + } + // We can read from the alias just like we can read from the index. + String aliasName = "#" + indexName; + long totalDocs = client().prepareSearch(indexName).setSize(0).get().getHits().totalHits(); + assertHitCount(client().prepareSearch(aliasName).setSize(0).get(), totalDocs); + assertThat(totalDocs, greaterThanOrEqualTo(2000L)); + + // We can remove the alias. + assertAcked(client().admin().indices().prepareAliases().removeAlias(indexName, aliasName).get()); + assertFalse(client().admin().indices().prepareAliasesExist(aliasName).get().exists()); + } + private Path getNodeDir(String indexFile) throws IOException { Path unzipDir = createTempDir(); Path unzipDataDir = unzipDir.resolve("data"); @@ -448,8 +477,15 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { throw new IllegalStateException("Backwards index must contain exactly one cluster"); } - // the bwc scripts packs the indices under this path - return list[0].resolve("nodes/0/"); + int zipIndex = indexFile.indexOf(".zip"); + final Version version = Version.fromString(indexFile.substring("index-".length(), zipIndex)); + if (version.before(Version.V_5_0_0_alpha1)) { + // the bwc scripts packs the indices under this path + return list[0].resolve("nodes/0/"); + } else { + // after 5.0.0, data folders do not include the cluster name + return list[0].resolve("0"); + } } public void testOldClusterStates() throws Exception { @@ -484,9 +520,19 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-"); Path nodeDir = getNodeDir(indexFile); logger.info("Parsing cluster state files from index [{}]", indexName); - assertNotNull(globalFormat.loadLatestState(logger, nodeDir)); // no exception - Path indexDir = nodeDir.resolve("indices").resolve(indexName); - assertNotNull(indexFormat.loadLatestState(logger, indexDir)); // no exception + final MetaData metaData = globalFormat.loadLatestState(logger, nodeDir); + assertNotNull(metaData); + + final Version version = Version.fromString(indexName.substring("index-".length())); + final Path dataDir; + if (version.before(Version.V_5_0_0_alpha1)) { + dataDir = nodeDir.getParent().getParent(); + } else { + dataDir = nodeDir.getParent(); + } + final Path indexDir = getIndexDir(logger, indexName, indexFile, dataDir); + assertNotNull(indexFormat.loadLatestState(logger, indexDir)); } } + } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java index d7ed0d8db5e..ec8c12cb525 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.bwcompat; +import org.elasticsearch.Version; import org.elasticsearch.common.io.FileTestUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -70,7 +71,12 @@ public class RepositoryUpgradabilityIT extends AbstractSnapshotIntegTestCase { final Set snapshotInfos = Sets.newHashSet(getSnapshots(repoName)); assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo originalSnapshot = snapshotInfos.iterator().next(); - assertThat(originalSnapshot.snapshotId(), equalTo(new SnapshotId("test_1", "test_1"))); + if (Version.fromString(version).before(Version.V_5_0_0_alpha1)) { + assertThat(originalSnapshot.snapshotId(), equalTo(new SnapshotId("test_1", "test_1"))); + } else { + assertThat(originalSnapshot.snapshotId().getName(), equalTo("test_1")); + assertNotNull(originalSnapshot.snapshotId().getUUID()); // it's a random UUID now + } assertThat(Sets.newHashSet(originalSnapshot.indices()), equalTo(indices)); logger.info("--> restore the original snapshot"); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index 494aa7d1095..6ed3d64e46e 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -96,7 +96,7 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { SortedSet expectedVersions = new TreeSet<>(); for (Version v : VersionUtils.allVersions()) { if (VersionUtils.isSnapshot(v)) continue; // snapshots are unreleased, so there is no backcompat yet - if (v.isAlpha()) continue; // no guarantees for alpha releases + if (v.isRelease() == false) continue; // no guarantees for prereleases if (v.onOrBefore(Version.V_2_0_0_beta1)) continue; // we can only test back one major lucene version if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself expectedVersions.add(v.toString()); diff --git a/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java b/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java index a82f964c013..ccc72db9d7a 100644 --- a/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java +++ b/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java @@ -48,9 +48,6 @@ import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; -/** - * - */ public abstract class AbstractClientHeadersTestCase extends ESTestCase { protected static final Settings HEADER_SETTINGS = Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java index 04f7b73b1f2..fff3b3cc3af 100644 --- a/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java @@ -28,16 +28,12 @@ import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.AbstractClientHeadersTestCase; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; import java.util.Collections; import java.util.HashMap; -/** - * - */ public class NodeClientHeadersTests extends AbstractClientHeadersTestCase { private static final ActionFilters EMPTY_FILTERS = new ActionFilters(Collections.emptySet()); @@ -47,7 +43,7 @@ public class NodeClientHeadersTests extends AbstractClientHeadersTestCase { Settings settings = HEADER_SETTINGS; Actions actions = new Actions(settings, threadPool, testedActions); NodeClient client = new NodeClient(settings, threadPool); - client.intialize(actions); + client.initialize(actions); return client; } diff --git a/core/src/test/java/org/elasticsearch/client/node/NodeClientIT.java b/core/src/test/java/org/elasticsearch/client/node/NodeClientIT.java index 073a2bfc0ae..1cae3a15deb 100644 --- a/core/src/test/java/org/elasticsearch/client/node/NodeClientIT.java +++ b/core/src/test/java/org/elasticsearch/client/node/NodeClientIT.java @@ -26,9 +26,6 @@ import org.elasticsearch.test.ESIntegTestCase.Scope; import static org.hamcrest.Matchers.is; -/** - * - */ @ClusterScope(scope = Scope.SUITE) public class NodeClientIT extends ESIntegTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java index 9d2c176dffb..bc771f5721d 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java @@ -19,10 +19,7 @@ package org.elasticsearch.client.transport; -import com.carrotsearch.randomizedtesting.generators.RandomInts; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.Lifecycle; @@ -37,7 +34,6 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportServiceAdapter; import java.io.IOException; @@ -83,7 +79,7 @@ abstract class FailAndRetryMockTransport imp //once nodes are connected we'll just return errors for each sendRequest call triedNodes.add(node); - if (RandomInts.randomInt(random, 100) > 10) { + if (random.nextInt(100) > 10) { connectTransportExceptions.incrementAndGet(); throw new ConnectTransportException(node, "node not available"); } else { diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java index 02240a6bf24..d25ae28cf28 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.env.Environment; @@ -61,14 +62,20 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { @Override public void tearDown() throws Exception { - super.tearDown(); - transportService.stop(); - transportService.close(); + try { + // stop this first before we bubble up since + // transportService uses the threadpool that super.tearDown will close + transportService.stop(); + transportService.close(); + } finally { + super.tearDown(); + } + } @Override protected Client buildClient(Settings headersSettings, GenericAction[] testedActions) { - transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); transportService.start(); transportService.acceptIncomingRequests(); TransportClient client = new MockTransportClient(Settings.builder() @@ -120,12 +127,12 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { private InternalTransportServiceInterceptor instance = new InternalTransportServiceInterceptor(); @Override - public List getTransportInterceptors() { + public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry) { return Collections.singletonList(new TransportInterceptor() { @Override - public TransportRequestHandler interceptHandler(String action, - TransportRequestHandler actualHandler) { - return instance.interceptHandler(action, actualHandler); + public TransportRequestHandler interceptHandler(String action, String executor, + TransportRequestHandler actualHandler) { + return instance.interceptHandler(action, executor, actualHandler); } @Override diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java index 3f4fd501171..dbb066dcb1b 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java @@ -26,15 +26,19 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.env.Environment; +import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.discovery.MockZenPing; +import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.MockTransportClient; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.Arrays; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -54,15 +58,15 @@ public class TransportClientIT extends ESIntegTestCase { public void testNodeVersionIsUpdated() throws IOException, NodeValidationException { TransportClient client = (TransportClient) internalCluster().client(); - try (Node node = new Node(Settings.builder() + try (Node node = new MockNode(Settings.builder() .put(internalCluster().getDefaultSettings()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("node.name", "testNodeVersionIsUpdated") - .put("transport.type", "local") + .put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(Node.NODE_DATA_SETTING.getKey(), false) .put("cluster.name", "foobar") - .build()).start()) { + .build(), Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class)).start()) { TransportAddress transportAddress = node.injector().getInstance(TransportService.class).boundAddress().publishAddress(); client.addTransportAddress(transportAddress); // since we force transport clients there has to be one node started that we connect to. diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index 1596519651f..fcd2d113aa7 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAct import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -39,13 +39,16 @@ import org.elasticsearch.transport.TransportService; import java.io.Closeable; import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.CoreMatchers.startsWith; @@ -60,6 +63,9 @@ public class TransportClientNodesServiceTests extends ESTestCase { private final TransportService transportService; private final TransportClientNodesService transportClientNodesService; private final int nodesCount; + private TransportAddress livenessAddress = buildNewFakeTransportAddress(); + public Set nodeAddresses = new HashSet<>(); + TestIteration() { Settings settings = Settings.builder().put("cluster.name", "test").build(); @@ -91,14 +97,16 @@ public class TransportClientNodesServiceTests extends ESTestCase { } }; } - }); + }, null); transportService.start(); transportService.acceptIncomingRequests(); transportClientNodesService = new TransportClientNodesService(settings, transportService, threadPool); this.nodesCount = randomIntBetween(1, 10); for (int i = 0; i < nodesCount; i++) { - transportClientNodesService.addTransportAddresses(new LocalTransportAddress("node" + i)); + TransportAddress transportAddress = buildNewFakeTransportAddress(); + nodeAddresses.add(transportAddress); + transportClientNodesService.addTransportAddresses(transportAddress); } transport.endConnectMode(); } @@ -118,7 +126,7 @@ public class TransportClientNodesServiceTests extends ESTestCase { LivenessResponse livenessResponse = new LivenessResponse(clusterName, new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), "liveness-hostname" + node.getId(), "liveness-hostaddress" + node.getId(), - new LocalTransportAddress("liveness-address-" + node.getId()), node.getAttributes(), node.getRoles(), + livenessAddress, node.getAttributes(), node.getRoles(), node.getVersion())); handler.handleResponse((T)livenessResponse); } @@ -237,10 +245,8 @@ public class TransportClientNodesServiceTests extends ESTestCase { for (DiscoveryNode discoveryNode : iteration.transportClientNodesService.connectedNodes()) { assertThat(discoveryNode.getHostName(), startsWith("liveness-")); assertThat(discoveryNode.getHostAddress(), startsWith("liveness-")); - assertThat(discoveryNode.getAddress(), instanceOf(LocalTransportAddress.class)); - LocalTransportAddress localTransportAddress = (LocalTransportAddress) discoveryNode.getAddress(); - //the original listed transport address is kept rather than the one returned from the liveness api - assertThat(localTransportAddress.id(), startsWith("node")); + assertNotEquals(discoveryNode.getAddress(), iteration.livenessAddress); + assertThat(iteration.nodeAddresses, hasItem(discoveryNode.getAddress())); } } } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java index 6326d96f317..939954c4560 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; @@ -320,7 +319,7 @@ public class ClusterChangedEventTests extends ESTestCase { // Create a new DiscoveryNode private static DiscoveryNode newNode(final String nodeId, Set roles) { - return new DiscoveryNode(nodeId, nodeId, nodeId, "host", "host_address", new LocalTransportAddress("_test_" + nodeId), + return new DiscoveryNode(nodeId, nodeId, nodeId, "host", "host_address", buildNewFakeTransportAddress(), Collections.emptyMap(), roles, Version.CURRENT); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 5c710ec92da..3811e37389c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -19,17 +19,25 @@ package org.elasticsearch.cluster; -import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; -import org.elasticsearch.cluster.metadata.IndexTemplateFilter; -import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.ModuleTestCase; import org.elasticsearch.common.settings.ClusterSettings; @@ -40,9 +48,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.plugins.ClusterPlugin; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.function.Supplier; @@ -156,4 +167,34 @@ public class ClusterModuleTests extends ModuleTestCase { NullPointerException e = expectThrows(NullPointerException.class, () -> newClusterModuleWithShardsAllocator(settings, "bad", () -> null)); } + + // makes sure that the allocation deciders are setup in the correct order, such that the + // slower allocation deciders come last and we can exit early if there is a NO decision without + // running them. If the order of the deciders is changed for a valid reason, the order should be + // changed in the test too. + public void testAllocationDeciderOrder() { + List> expectedDeciders = Arrays.asList( + MaxRetryAllocationDecider.class, + ReplicaAfterPrimaryActiveAllocationDecider.class, + RebalanceOnlyWhenActiveAllocationDecider.class, + ClusterRebalanceAllocationDecider.class, + ConcurrentRebalanceAllocationDecider.class, + EnableAllocationDecider.class, + NodeVersionAllocationDecider.class, + SnapshotInProgressAllocationDecider.class, + FilterAllocationDecider.class, + SameShardAllocationDecider.class, + DiskThresholdDecider.class, + ThrottlingAllocationDecider.class, + ShardsLimitAllocationDecider.class, + AwarenessAllocationDecider.class); + Collection deciders = ClusterModule.createAllocationDeciders(Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), Collections.emptyList()); + Iterator iter = deciders.iterator(); + int idx = 0; + while (iter.hasNext()) { + AllocationDecider decider = iter.next(); + assertSame(decider.getClass(), expectedDeciders.get(idx++)); + } + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index db59e785871..c77d7c10c96 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -46,7 +46,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; @@ -74,9 +73,9 @@ import static org.hamcrest.Matchers.is; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 0, numClientNodes = 0) public class ClusterStateDiffIT extends ESIntegTestCase { public void testClusterStateDiffSerialization() throws Exception { - DiscoveryNode masterNode = new DiscoveryNode("master", new LocalTransportAddress("master"), + DiscoveryNode masterNode = new DiscoveryNode("master", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"), + DiscoveryNode otherNode = new DiscoveryNode("other", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(masterNode).add(otherNode).localNodeId(masterNode.getId()).build(); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); @@ -193,14 +192,14 @@ public class ClusterStateDiffIT extends ESIntegTestCase { if (nodeId.startsWith("node-")) { nodes.remove(nodeId); if (randomBoolean()) { - nodes.add(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), emptyMap(), + nodes.add(new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), emptySet(), randomVersion(random()))); } } } int additionalNodeCount = randomIntBetween(1, 20); for (int i = 0; i < additionalNodeCount; i++) { - nodes.add(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)), + nodes.add(new DiscoveryNode("node-" + randomAsciiOfLength(10), buildNewFakeTransportAddress(), emptyMap(), emptySet(), randomVersion(random()))); } return ClusterState.builder(clusterState).nodes(nodes); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index b0cba5bf1de..21c661230eb 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.test.ESTestCase; import static java.util.Collections.emptyMap; @@ -33,8 +32,8 @@ public class ClusterStateTests extends ESTestCase { public void testSupersedes() { final Version version = Version.CURRENT; - final DiscoveryNode node1 = new DiscoveryNode("node1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), version); - final DiscoveryNode node2 = new DiscoveryNode("node2", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), version); + final DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version); + final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version); final DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build(); ClusterName name = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); ClusterState noMaster1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index e579b857042..1c69658afdd 100644 --- a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; @@ -199,11 +198,11 @@ public class DiskUsageTests extends ESTestCase { new FsInfo.Path("/most", "/dev/sda", 100, 90, 80), }; List nodeStats = Arrays.asList( - new NodeStats(new DiscoveryNode("node_1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), 0, + new NodeStats(new DiscoveryNode("node_1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null,null,null,null,null,new FsInfo(0, null, node1FSInfo), null,null,null,null,null, null), - new NodeStats(new DiscoveryNode("node_2", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), 0, + new NodeStats(new DiscoveryNode("node_2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null,null,null,null,null, new FsInfo(0, null, node2FSInfo), null,null,null,null,null, null), - new NodeStats(new DiscoveryNode("node_3", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), 0, + new NodeStats(new DiscoveryNode("node_3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null,null,null,null,null, new FsInfo(0, null, node3FSInfo), null,null,null,null,null, null) ); InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvaiableUsages, newMostAvaiableUsages); @@ -240,11 +239,11 @@ public class DiskUsageTests extends ESTestCase { new FsInfo.Path("/least", "/dev/sda", 10, -8, 0), }; List nodeStats = Arrays.asList( - new NodeStats(new DiscoveryNode("node_1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), 0, + new NodeStats(new DiscoveryNode("node_1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null,null,null,null,null,new FsInfo(0, null, node1FSInfo), null,null,null,null,null, null), - new NodeStats(new DiscoveryNode("node_2", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), 0, + new NodeStats(new DiscoveryNode("node_2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null,null,null,null,null, new FsInfo(0, null, node2FSInfo), null,null,null,null,null, null), - new NodeStats(new DiscoveryNode("node_3", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), 0, + new NodeStats(new DiscoveryNode("node_3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null,null,null,null,null, new FsInfo(0, null, node3FSInfo), null,null,null,null,null, null) ); InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvailableUsages, newMostAvailableUsages); diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 2e86cb5b896..fd68e484062 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -63,7 +63,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -@ESIntegTestCase.SuppressLocalMode @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") public class MinimumMasterNodesIT extends ESIntegTestCase { @@ -74,10 +73,14 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { return classes; } + @Override + protected boolean addMockZenPings() { + return false; + } + public void testSimpleMinimumMasterNodes() throws Exception { Settings settings = Settings.builder() - .put("discovery.type", "zen") .put("discovery.zen.minimum_master_nodes", 2) .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms") .put("discovery.initial_state_timeout", "500ms") @@ -129,7 +132,9 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { }); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true)); - assertThat(state.nodes().getSize(), equalTo(1)); // verify that we still see the local node in the cluster state + // verify that both nodes are still in the cluster state but there is no master + assertThat(state.nodes().getSize(), equalTo(2)); + assertThat(state.nodes().getMasterNode(), equalTo(null)); logger.info("--> starting the previous master node again..."); internalCluster().startNode(settings); @@ -189,7 +194,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { public void testMultipleNodesShutdownNonMasterNodes() throws Exception { Settings settings = Settings.builder() - .put("discovery.type", "zen") .put("discovery.zen.minimum_master_nodes", 3) .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "1s") .put("discovery.initial_state_timeout", "500ms") @@ -265,7 +269,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { public void testDynamicUpdateMinimumMasterNodes() throws Exception { Settings settings = Settings.builder() - .put("discovery.type", "zen") .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms") .put("discovery.initial_state_timeout", "500ms") .build(); @@ -323,7 +326,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { public void testCanNotBringClusterDown() throws ExecutionException, InterruptedException { int nodeCount = scaledRandomIntBetween(1, 5); Settings.Builder settings = Settings.builder() - .put("discovery.type", "zen") .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms") .put("discovery.initial_state_timeout", "500ms"); @@ -362,7 +364,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { public void testCanNotPublishWithoutMinMastNodes() throws Exception { Settings settings = Settings.builder() - .put("discovery.type", "zen") .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms") .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up diff --git a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java index 84410a92c83..f73043ce4e4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -29,16 +29,19 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import java.util.Collections; + import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; @@ -46,11 +49,15 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; -/** - */ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -@ESIntegTestCase.SuppressLocalMode public class NoMasterNodeIT extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build(); + } + public void testNoMasterActions() throws Exception { // note, sometimes, we want to check with the fact that an index gets created, sometimes not... boolean autoCreateIndex = randomBoolean(); @@ -117,12 +124,14 @@ public class NoMasterNodeIT extends ESIntegTestCase { checkWriteAction( false, timeout, client().prepareUpdate("test", "type1", "1") - .setScript(new Script("test script", ScriptService.ScriptType.INLINE, null, null)).setTimeout(timeout)); + .setScript(new Script( + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", Collections.emptyMap())).setTimeout(timeout)); checkWriteAction( autoCreateIndex, timeout, client().prepareUpdate("no_index", "type1", "1") - .setScript(new Script("test script", ScriptService.ScriptType.INLINE, null, null)).setTimeout(timeout)); + .setScript(new Script( + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", Collections.emptyMap())).setTimeout(timeout)); checkWriteAction(false, timeout, @@ -214,7 +223,7 @@ public class NoMasterNodeIT extends ESIntegTestCase { ensureSearchable("test1", "test2"); ClusterStateResponse clusterState = client().admin().cluster().prepareState().get(); - logger.info("Cluster state:\n{}", clusterState.getState().prettyPrint()); + logger.info("Cluster state:\n{}", clusterState.getState()); internalCluster().stopRandomDataNode(); assertTrue(awaitBusy(() -> { diff --git a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 5bf2bc38c3e..863349e897a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.test.ESTestCase; @@ -64,7 +63,7 @@ public class NodeConnectionsServiceTests extends ESTestCase { List nodes = new ArrayList<>(); for (int i = randomIntBetween(20, 50); i > 0; i--) { Set roles = new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))); - nodes.add(new DiscoveryNode("node_" + i, "" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(), + nodes.add(new DiscoveryNode("node_" + i, "" + i, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, Version.CURRENT)); } return nodes; @@ -85,19 +84,19 @@ public class NodeConnectionsServiceTests extends ESTestCase { ClusterState current = clusterStateFromNodes(Collections.emptyList()); ClusterChangedEvent event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); - service.connectToAddedNodes(event); + service.connectToNodes(event.nodesDelta().addedNodes()); assertConnected(event.nodesDelta().addedNodes()); - service.disconnectFromRemovedNodes(event); + service.disconnectFromNodes(event.nodesDelta().removedNodes()); assertConnectedExactlyToNodes(event.state()); current = event.state(); event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); - service.connectToAddedNodes(event); + service.connectToNodes(event.nodesDelta().addedNodes()); assertConnected(event.nodesDelta().addedNodes()); - service.disconnectFromRemovedNodes(event); + service.disconnectFromNodes(event.nodesDelta().removedNodes()); assertConnectedExactlyToNodes(event.state()); } @@ -111,7 +110,7 @@ public class NodeConnectionsServiceTests extends ESTestCase { transport.randomConnectionExceptions = true; - service.connectToAddedNodes(event); + service.connectToNodes(event.nodesDelta().addedNodes()); for (int i = 0; i < 3; i++) { // simulate disconnects @@ -149,7 +148,7 @@ public class NodeConnectionsServiceTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); this.transport = new MockTransport(); - transportService = new TransportService(Settings.EMPTY, transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + transportService = new TransportService(Settings.EMPTY, transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); } diff --git a/core/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java index f411e00468e..a0a00256817 100644 --- a/core/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java @@ -34,9 +34,6 @@ import static org.elasticsearch.client.Requests.createIndexRequest; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; import static org.hamcrest.Matchers.equalTo; -/** - * - */ @ClusterScope(scope= Scope.TEST, numDataNodes =0) public class SimpleDataNodesIT extends ESIntegTestCase { public void testDataNodes() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 4773aafbf3f..c033ad7ff27 100644 --- a/core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -38,15 +38,11 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -@ESIntegTestCase.SuppressLocalMode public class SpecificMasterNodesIT extends ESIntegTestCase { - protected final Settings.Builder settingsBuilder() { - return Settings.builder().put("discovery.type", "zen"); - } public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start data node / non master node"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); try { assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); @@ -54,7 +50,7 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { // all is well, no master elected } logger.info("--> start master node"); - final String masterNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); @@ -69,14 +65,14 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { } logger.info("--> start master node"); - final String nextMasterEligibleNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + final String nextMasterEligibleNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); } public void testElectOnlyBetweenMasterNodes() throws IOException { logger.info("--> start data node / non master node"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); try { assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); @@ -84,12 +80,12 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { // all is well, no master elected } logger.info("--> start master node (1)"); - final String masterNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> start master node (2)"); - final String nextMasterEligableNodeName = internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + final String nextMasterEligableNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); @@ -106,10 +102,10 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { */ public void testCustomDefaultMapping() throws Exception { logger.info("--> start master node / non data"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); logger.info("--> start data node / non master node"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); createIndex("test"); assertAcked(client().admin().indices().preparePutMapping("test").setType("_default_").setSource("timestamp", "type=date")); @@ -128,10 +124,10 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { public void testAliasFilterValidation() throws Exception { logger.info("--> start master node / non data"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); logger.info("--> start data node / non master node"); - internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); assertAcked(prepareCreate("test").addMapping("type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", \"properties\" : {\"field_a\" : { \"type\" : \"keyword\" },\"field_b\" :{ \"type\" : \"keyword\" }}}}}}")); client().admin().indices().prepareAliases().addAlias("test", "a_test", QueryBuilders.nestedQuery("table_a", QueryBuilders.termQuery("table_a.field_b", "y"), ScoreMode.Avg)).get(); diff --git a/core/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java b/core/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java index 89f072e2a35..9613128a00b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java @@ -30,8 +30,6 @@ import org.elasticsearch.test.ESIntegTestCase.Scope; import static org.hamcrest.Matchers.equalTo; -/** - */ @ClusterScope(scope= Scope.TEST, numDataNodes =0) public class UpdateSettingsValidationIT extends ESIntegTestCase { public void testUpdateSettingsValidation() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index e042fadca95..ce6704cef82 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -106,7 +106,8 @@ public class ShardStateActionTests extends ESTestCase { super.setUp(); this.transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); - transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new TestShardStateAction(Settings.EMPTY, clusterService, transportService, null, null); diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index 31e841227b8..d98f9294243 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -41,8 +41,6 @@ import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; -/** - */ @ClusterScope(scope= ESIntegTestCase.Scope.TEST, numDataNodes =0, minNumDataNodes = 2) public class AwarenessAllocationIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 22d02f51469..9c6a4273a7f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -65,8 +65,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBloc import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -/** - */ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class ClusterRerouteIT extends ESIntegTestCase { private final Logger logger = Loggers.getLogger(ClusterRerouteIT.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index d80e16397ac..eb5c88d7e83 100644 --- a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -93,7 +93,7 @@ public class ClusterStateHealthTests extends ESTestCase { super.setUp(); clusterService = createClusterService(threadPool); transportService = new TransportService(clusterService.getSettings(), new CapturingTransport(), threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); } @@ -277,9 +277,9 @@ public class ClusterStateHealthTests extends ESTestCase { // if the inactive primaries are due solely to recovery (not failed allocation or previously being allocated) // then cluster health is YELLOW, otherwise RED if (primaryInactiveDueToRecovery(indexName, clusterState)) { - assertThat("clusterState is:\n" + clusterState.prettyPrint(), health.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); + assertThat("clusterState is:\n" + clusterState, health.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); } else { - assertThat("clusterState is:\n" + clusterState.prettyPrint(), health.getStatus(), equalTo(ClusterHealthStatus.RED)); + assertThat("clusterState is:\n" + clusterState, health.getStatus(), equalTo(ClusterHealthStatus.RED)); } } } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/AliasValidatorTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/AliasValidatorTests.java new file mode 100644 index 00000000000..05bd9eeab8c --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/AliasValidatorTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.InvalidAliasNameException; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.startsWith; + +public class AliasValidatorTests extends ESTestCase { + public void testValidatesAliasNames() { + AliasValidator validator = new AliasValidator(Settings.EMPTY); + Exception e = expectThrows(InvalidAliasNameException.class, () -> validator.validateAliasStandalone(".", null)); + assertEquals("Invalid alias name [.]: must not be '.' or '..'", e.getMessage()); + e = expectThrows(InvalidAliasNameException.class, () -> validator.validateAliasStandalone("..", null)); + assertEquals("Invalid alias name [..]: must not be '.' or '..'", e.getMessage()); + e = expectThrows(InvalidAliasNameException.class, () -> validator.validateAliasStandalone("_cat", null)); + assertEquals("Invalid alias name [_cat]: must not start with '_', '-', or '+'", e.getMessage()); + e = expectThrows(InvalidAliasNameException.class, () -> validator.validateAliasStandalone("-cat", null)); + assertEquals("Invalid alias name [-cat]: must not start with '_', '-', or '+'", e.getMessage()); + e = expectThrows(InvalidAliasNameException.class, () -> validator.validateAliasStandalone("+cat", null)); + assertEquals("Invalid alias name [+cat]: must not start with '_', '-', or '+'", e.getMessage()); + e = expectThrows(InvalidAliasNameException.class, () -> validator.validateAliasStandalone("c*t", null)); + assertThat(e.getMessage(), startsWith("Invalid alias name [c*t]: must not contain the following characters ")); + + // Doesn't throw an exception because we allow upper case alias names + validator.validateAliasStandalone("CAT", null); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java index 0215f94d4da..e47ca2184f8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java @@ -177,43 +177,31 @@ public class DateMathExpressionResolverTests extends ESTestCase { } public void testExpressionInvalidUnescaped() throws Exception { - try { - expressionResolver.resolve(context, Arrays.asList("<.mar}vel-{now/d}>")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("invalid dynamic name expression")); - assertThat(e.getMessage(), containsString("invalid character at position [")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> expressionResolver.resolve(context, Arrays.asList("<.mar}vel-{now/d}>"))); + assertThat(e.getMessage(), containsString("invalid dynamic name expression")); + assertThat(e.getMessage(), containsString("invalid character at position [")); } public void testExpressionInvalidDateMathFormat() throws Exception { - try { - expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}>")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("invalid dynamic name expression")); - assertThat(e.getMessage(), containsString("date math placeholder is open ended")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}>"))); + assertThat(e.getMessage(), containsString("invalid dynamic name expression")); + assertThat(e.getMessage(), containsString("date math placeholder is open ended")); } public void testExpressionInvalidEmptyDateMathFormat() throws Exception { - try { - expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}}>")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("invalid dynamic name expression")); - assertThat(e.getMessage(), containsString("missing date format")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}}>"))); + assertThat(e.getMessage(), containsString("invalid dynamic name expression")); + assertThat(e.getMessage(), containsString("missing date format")); } public void testExpressionInvalidOpenEnded() throws Exception { - try { - expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d>")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("invalid dynamic name expression")); - assertThat(e.getMessage(), containsString("date math placeholder is open ended")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d>"))); + assertThat(e.getMessage(), containsString("invalid dynamic name expression")); + assertThat(e.getMessage(), containsString("date math placeholder is open ended")); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 0e0c9fb442b..b68f3735c0a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -44,8 +44,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -/** - */ public class IndexNameExpressionResolverTests extends ESTestCase { private final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY); @@ -307,7 +305,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { assertEquals(1, results.length); assertEquals("bar", results[0]); - results = indexNameExpressionResolver.concreteIndexNames(context, "-foo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "*", "-foo*"); assertEquals(1, results.length); assertEquals("bar", results[0]); @@ -587,6 +585,64 @@ public class IndexNameExpressionResolverTests extends ESTestCase { assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); } + public void testConcreteIndicesWildcardWithNegation() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX").state(State.OPEN)) + .put(indexBuilder("testXXY").state(State.OPEN)) + .put(indexBuilder("testXYY").state(State.OPEN)) + .put(indexBuilder("-testXYZ").state(State.OPEN)) + .put(indexBuilder("-testXZZ").state(State.OPEN)) + .put(indexBuilder("-testYYY").state(State.OPEN)) + .put(indexBuilder("testYYY").state(State.OPEN)) + .put(indexBuilder("testYYX").state(State.OPEN)); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, + IndicesOptions.fromOptions(true, true, true, true)); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), + equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "test*", "-testX*")), + equalTo(newHashSet("testYYY", "testYYX"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "-testX*")), + equalTo(newHashSet("-testXYZ", "-testXZZ"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testXXY", "-testX*")), + equalTo(newHashSet("testXXY", "-testXYZ", "-testXZZ"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "*", "--testX*")), + equalTo(newHashSet("testXXX", "testXXY", "testXYY", "testYYX", "testYYY", "-testYYY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "-testXXX", "test*")), + equalTo(newHashSet("testYYX", "testXXX", "testXYY", "testYYY", "testXXY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "test*", "-testXXX")), + equalTo(newHashSet("testYYX", "testXYY", "testYYY", "testXXY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "+testXXX", "+testXXY", "+testYYY", "-testYYY")), + equalTo(newHashSet("testXXX", "testXXY", "testYYY", "-testYYY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testYYY", "testYYX", "testX*", "-testXXX")), + equalTo(newHashSet("testYYY", "testYYX", "testXXY", "testXYY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "-testXXX", "*testY*", "-testYYY")), + equalTo(newHashSet("testYYX", "testYYY", "-testYYY"))); + + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), "-doesnotexist"); + assertEquals(0, indexNames.length); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), "-*")), + equalTo(newHashSet("-testXYZ", "-testXZZ", "-testYYY"))); + + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), + "+testXXX", "+testXXY", "+testXYY", "-testXXY")), + equalTo(newHashSet("testXXX", "testXYY", "testXXY"))); + + indexNames = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), "*", "-*"); + assertEquals(0, indexNames.length); + } + /** * test resolving _all pattern (null, empty array or "_all") for random IndicesOptions */ diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index d74b450f5bf..2b8d86ac132 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -34,7 +34,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.InvalidIndexNameException; @@ -181,7 +180,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { } private DiscoveryNode newNode(String nodeId) { - return new DiscoveryNode(nodeId, LocalTransportAddress.buildUnique(), emptyMap(), + return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNode.Role.MASTER, DiscoveryNode.Role.DATA))), Version.CURRENT); } @@ -207,17 +206,4 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { .getDefault(Settings.EMPTY)).build())); assertThat(e.getMessage(), endsWith(errorMessage)); } - - private MetaDataCreateIndexService getCreateIndexService() { - return new MetaDataCreateIndexService( - Settings.EMPTY, - null, - null, - null, - null, - null, - null, - null, - null); - } } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java index 03f62830ba7..8a342057dab 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java @@ -33,7 +33,7 @@ import java.util.Collection; import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.contains; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyCollectionOf; +import static org.mockito.Matchers.anySetOf; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -41,11 +41,11 @@ public class MetaDataIndexAliasesServiceTests extends ESTestCase { private final AliasValidator aliasValidator = new AliasValidator(Settings.EMPTY); private final MetaDataDeleteIndexService deleteIndexService = mock(MetaDataDeleteIndexService.class); private final MetaDataIndexAliasesService service = new MetaDataIndexAliasesService(Settings.EMPTY, null, null, aliasValidator, - null, deleteIndexService); + deleteIndexService); public MetaDataIndexAliasesServiceTests() { // Mock any deletes so we don't need to worry about how MetaDataDeleteIndexService does its job - when(deleteIndexService.deleteIndices(any(ClusterState.class), anyCollectionOf(Index.class))).then(i -> { + when(deleteIndexService.deleteIndices(any(ClusterState.class), anySetOf(Index.class))).then(i -> { ClusterState state = (ClusterState) i.getArguments()[0]; @SuppressWarnings("unchecked") Collection indices = (Collection) i.getArguments()[1]; diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java index 8ad86818614..3d3da02822a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java @@ -32,9 +32,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class ToAndFromJsonMetaDataTests extends ESTestCase { public void testSimpleJsonFromAndTo() throws IOException { MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index 01110e796e8..bac9a681341 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -50,9 +50,9 @@ public class WildcardExpressionResolverTests extends ESTestCase { assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testYYY"))), equalTo(newHashSet("testXXX", "testYYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))).size(), equalTo(0)); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))), equalTo(newHashSet("testXXX", "-testXXX"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testY*"))), equalTo(newHashSet("testXXX", "testYYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testX*"))).size(), equalTo(0)); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testX*"))), equalTo(newHashSet("testXXX"))); } public void testConvertWildcardsTests() { @@ -66,7 +66,7 @@ public class WildcardExpressionResolverTests extends ESTestCase { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYY*", "alias*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("-kuku"))), equalTo(newHashSet("-kuku"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+test*", "-testYYY"))), equalTo(newHashSet("testXXX", "testXYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+testX*", "+testYYY"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+testYYY", "+testX*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java index 59f058a95fb..1f226427237 100644 --- a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java @@ -21,8 +21,7 @@ package org.elasticsearch.cluster.node; import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -42,15 +41,13 @@ import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; import static org.hamcrest.Matchers.equalTo; -/** - */ public class DiscoveryNodeFiltersTests extends ESTestCase { - private static InetSocketTransportAddress localAddress; + private static TransportAddress localAddress; @BeforeClass public static void createLocalAddress() throws UnknownHostException { - localAddress = new InetSocketTransportAddress(InetAddress.getByName("192.1.1.54"), 9999); + localAddress = new TransportAddress(InetAddress.getByName("192.1.1.54"), 9999); } @AfterClass @@ -64,11 +61,11 @@ public class DiscoveryNodeFiltersTests extends ESTestCase { .build(); DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings); - DiscoveryNode node = new DiscoveryNode("name1", "id1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), + DiscoveryNode node = new DiscoveryNode("name1", "id1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); assertThat(filters.match(node), equalTo(true)); - node = new DiscoveryNode("name2", "id2", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + node = new DiscoveryNode("name2", "id2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); assertThat(filters.match(node), equalTo(false)); } @@ -78,11 +75,11 @@ public class DiscoveryNodeFiltersTests extends ESTestCase { .build(); DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings); - DiscoveryNode node = new DiscoveryNode("name1", "id1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), + DiscoveryNode node = new DiscoveryNode("name1", "id1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); assertThat(filters.match(node), equalTo(true)); - node = new DiscoveryNode("name2", "id2", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + node = new DiscoveryNode("name2", "id2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); assertThat(filters.match(node), equalTo(false)); } @@ -94,13 +91,13 @@ public class DiscoveryNodeFiltersTests extends ESTestCase { DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings); final Version version = Version.CURRENT; - DiscoveryNode node = new DiscoveryNode("name1", "id1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), version); + DiscoveryNode node = new DiscoveryNode("name1", "id1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version); assertThat(filters.match(node), equalTo(true)); - node = new DiscoveryNode("name2", "id2", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), version); + node = new DiscoveryNode("name2", "id2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version); assertThat(filters.match(node), equalTo(true)); - node = new DiscoveryNode("name3", "id3", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), version); + node = new DiscoveryNode("name3", "id3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version); assertThat(filters.match(node), equalTo(false)); } @@ -114,7 +111,7 @@ public class DiscoveryNodeFiltersTests extends ESTestCase { Map attributes = new HashMap<>(); attributes.put("tag", "A"); attributes.put("group", "B"); - DiscoveryNode node = new DiscoveryNode("name1", "id1", LocalTransportAddress.buildUnique(), + DiscoveryNode node = new DiscoveryNode("name1", "id1", buildNewFakeTransportAddress(), attributes, emptySet(), Version.CURRENT); assertThat(filters.match(node), equalTo(true)); @@ -122,7 +119,7 @@ public class DiscoveryNodeFiltersTests extends ESTestCase { attributes.put("tag", "A"); attributes.put("group", "B"); attributes.put("name", "X"); - node = new DiscoveryNode("name2", "id2", LocalTransportAddress.buildUnique(), + node = new DiscoveryNode("name2", "id2", buildNewFakeTransportAddress(), attributes, emptySet(), Version.CURRENT); assertThat(filters.match(node), equalTo(true)); @@ -130,11 +127,11 @@ public class DiscoveryNodeFiltersTests extends ESTestCase { attributes.put("tag", "A"); attributes.put("group", "F"); attributes.put("name", "X"); - node = new DiscoveryNode("name3", "id3", LocalTransportAddress.buildUnique(), + node = new DiscoveryNode("name3", "id3", buildNewFakeTransportAddress(), attributes, emptySet(), Version.CURRENT); assertThat(filters.match(node), equalTo(false)); - node = new DiscoveryNode("name4", "id4", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + node = new DiscoveryNode("name4", "id4", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); assertThat(filters.match(node), equalTo(false)); } @@ -144,7 +141,7 @@ public class DiscoveryNodeFiltersTests extends ESTestCase { .build(); DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings); - DiscoveryNode node = new DiscoveryNode("name1", "id1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), + DiscoveryNode node = new DiscoveryNode("name1", "id1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); assertThat(filters.match(node), equalTo(true)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index d6a83108d0f..342919fb881 100644 --- a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.node; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.elasticsearch.Version; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -136,8 +135,8 @@ public class DiscoveryNodesTests extends ESTestCase { final DiscoveryNodes discoNodesA = builderA.build(); final DiscoveryNodes discoNodesB = builderB.build(); - logger.info("nodes A: {}", discoNodesA.prettyPrint()); - logger.info("nodes B: {}", discoNodesB.prettyPrint()); + logger.info("nodes A: {}", discoNodesA); + logger.info("nodes B: {}", discoNodesB); DiscoveryNodes.Delta delta = discoNodesB.delta(discoNodesA); @@ -194,7 +193,7 @@ public class DiscoveryNodesTests extends ESTestCase { } private static DiscoveryNode newNode(int nodeId, Map attributes, Set roles) { - return new DiscoveryNode("name_" + nodeId, "node_" + nodeId, LocalTransportAddress.buildUnique(), attributes, roles, + return new DiscoveryNode("name_" + nodeId, "node_" + nodeId, buildNewFakeTransportAddress(), attributes, roles, Version.CURRENT); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java index 036c168eee8..173caaab379 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java @@ -34,8 +34,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - */ public class AllocationIdTests extends ESTestCase { public void testShardToStarted() { logger.info("-- create unassigned shard"); @@ -122,7 +120,7 @@ public class AllocationIdTests extends ESTestCase { shard = shard.reinitializePrimaryShard(); assertThat(shard.allocationId().getId(), notNullValue()); assertThat(shard.allocationId().getRelocationId(), nullValue()); - assertThat(shard.allocationId().getId(), not(equalTo(allocationId.getId()))); + assertThat(shard.allocationId().getId(), equalTo(allocationId.getId())); } public void testSerialization() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index 88f09a55c2e..e4321218983 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -34,8 +34,6 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -/** - */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class DelayedAllocationIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java index a700358384d..a41ecdec79e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java @@ -57,8 +57,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; -/** - */ public class DelayedAllocationServiceTests extends ESAllocationTestCase { private TestDelayAllocationService delayedAllocationService; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 8bc9c29bb37..0d284a1e47e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -38,7 +37,6 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import java.util.Arrays; @@ -55,7 +53,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) -@ESIntegTestCase.SuppressLocalMode public class PrimaryAllocationIT extends ESIntegTestCase { @Override @@ -64,6 +61,11 @@ public class PrimaryAllocationIT extends ESIntegTestCase { return Arrays.asList(MockTransportService.TestPlugin.class); } + @Override + protected boolean addMockZenPings() { + return false; + } + private void createStaleReplicaScenario() throws Exception { logger.info("--> starting 3 nodes, 1 master, 2 data"); String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); @@ -115,12 +117,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase { logger.info("--> check that old primary shard does not get promoted to primary again"); // kick reroute and wait for all shard states to be fetched client(master).admin().cluster().prepareReroute().get(); - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), equalTo(0)); - } - }); + assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), equalTo(0))); // kick reroute a second time and check that all shards are unassigned assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); } @@ -155,7 +152,9 @@ public class PrimaryAllocationIT extends ESIntegTestCase { client().admin().cluster().prepareReroute().add(new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true)).get(); logger.info("--> wait until shard is failed and becomes unassigned again"); - assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); + assertBusy(() -> + assertTrue(client().admin().cluster().prepareState().get().getState().toString(), + client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").getShards().get(0).primaryShard().unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java index 04277ba1eb4..211071c9555 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java @@ -27,8 +27,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; -/** - */ public class RoutingServiceTests extends ESAllocationTestCase { private TestRoutingService routingService; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 5eff8a0a53d..4fffcebc79b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -53,8 +53,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - */ public class UnassignedInfoTests extends ESAllocationTestCase { public void testReasonOrdinalOrder() { UnassignedInfo.Reason[] order = new UnassignedInfo.Reason[]{ @@ -308,7 +306,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); // make sure both replicas are marked as delayed (i.e. not reallocated) clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); - assertThat(clusterState.prettyPrint(), UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(2)); + assertThat(clusterState.toString(), UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(2)); } public void testFindNextDelayedAllocation() { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index ed7a944963d..e658ff03a18 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -89,7 +89,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { clusterState = addNodes(clusterState, service, 1, nodeOffset++); assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2)); assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(0)); - logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.debug("ClusterState: {}", clusterState.getRoutingNodes()); } public void testMinimalRelocations() { @@ -150,7 +150,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); assertThat(newState, equalTo(clusterState)); assertNumIndexShardsPerNode(clusterState, equalTo(2)); - logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.debug("ClusterState: {}", clusterState.getRoutingNodes()); } public void testMinimalRelocationsNoLimit() { @@ -212,7 +212,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); assertThat(newState, equalTo(clusterState)); assertNumIndexShardsPerNode(clusterState, equalTo(2)); - logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.debug("ClusterState: {}", clusterState.getRoutingNodes()); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 7dfa49455b8..5cae5d3d928 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -64,8 +64,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -/** - */ public class AllocationCommandsTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(AllocationCommandsTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index e7eacf94f9d..fec0a33b917 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -45,8 +45,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.sameInstance; -/** - */ public class AwarenessAllocationTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(AwarenessAllocationTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java new file mode 100644 index 00000000000..806e136bba3 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java @@ -0,0 +1,276 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Balancer; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.NodeRebalanceDecision; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.RebalanceDecision; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.hamcrest.Matchers.startsWith; + +/** + * Tests for balancing a single shard, see {@link Balancer#decideRebalance(ShardRouting)}. + */ +public class BalancedSingleShardTests extends ESAllocationTestCase { + + public void testRebalanceNonStartedShardNotAllowed() { + BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); + ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), + randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.UNASSIGNED, ShardRoutingState.RELOCATING)); + ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); + RebalanceDecision rebalanceDecision = allocator.decideRebalance(shard, newRoutingAllocation( + new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState)); + assertSame(RebalanceDecision.NOT_TAKEN, rebalanceDecision); + } + + public void testRebalanceNotAllowedDuringPendingAsyncFetch() { + BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); + ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED); + ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); + RoutingAllocation routingAllocation = newRoutingAllocation( + new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState); + routingAllocation.setHasPendingAsyncFetch(); + RebalanceDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); + assertNotNull(rebalanceDecision.getCanRebalanceDecision()); + assertEquals(Type.NO, rebalanceDecision.getFinalDecisionType()); + assertThat(rebalanceDecision.getFinalExplanation(), startsWith("cannot rebalance due to in-flight shard store fetches")); + assertNull(rebalanceDecision.getNodeDecisions()); + assertNull(rebalanceDecision.getAssignedNodeId()); + + assertAssignedNodeRemainsSame(allocator, routingAllocation, shard); + } + + public void testRebalancingNotAllowedDueToCanRebalance() { + final Decision canRebalanceDecision = randomFrom(Decision.NO, Decision.THROTTLE); + AllocationDecider noRebalanceDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { + return allocation.decision(canRebalanceDecision, "TEST", "foobar"); + } + }; + BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); + ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED); + ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); + RoutingAllocation routingAllocation = newRoutingAllocation( + new AllocationDeciders(Settings.EMPTY, Collections.singleton(noRebalanceDecider)), clusterState); + RebalanceDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); + assertEquals(canRebalanceDecision.type(), rebalanceDecision.getCanRebalanceDecision().type()); + assertEquals(canRebalanceDecision.type(), rebalanceDecision.getFinalDecisionType()); + assertEquals("rebalancing is not allowed", rebalanceDecision.getFinalExplanation()); + assertNotNull(rebalanceDecision.getNodeDecisions()); + assertNull(rebalanceDecision.getAssignedNodeId()); + assertEquals(1, rebalanceDecision.getCanRebalanceDecision().getDecisions().size()); + for (Decision subDecision : rebalanceDecision.getCanRebalanceDecision().getDecisions()) { + assertEquals("foobar", ((Decision.Single) subDecision).getExplanation()); + } + + assertAssignedNodeRemainsSame(allocator, routingAllocation, shard); + } + + public void testRebalancePossible() { + AllocationDecider canAllocateDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return Decision.YES; + } + }; + Tuple rebalance = setupStateAndRebalance(canAllocateDecider, Settings.EMPTY, true); + ClusterState clusterState = rebalance.v1(); + RebalanceDecision rebalanceDecision = rebalance.v2(); + assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type()); + assertEquals(Type.YES, rebalanceDecision.getFinalDecisionType()); + assertNotNull(rebalanceDecision.getFinalExplanation()); + assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size()); + assertNotNull(rebalanceDecision.getAssignedNodeId()); + } + + public void testRebalancingNotAllowedDueToCanAllocate() { + AllocationDecider canAllocateDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return Decision.NO; + } + }; + Tuple rebalance = setupStateAndRebalance(canAllocateDecider, Settings.EMPTY, false); + ClusterState clusterState = rebalance.v1(); + RebalanceDecision rebalanceDecision = rebalance.v2(); + assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type()); + assertEquals(Type.NO, rebalanceDecision.getFinalDecisionType()); + assertThat(rebalanceDecision.getFinalExplanation(), + startsWith("cannot rebalance shard, no other node exists that would form a more balanced")); + assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size()); + assertNull(rebalanceDecision.getAssignedNodeId()); + } + + public void testDontBalanceShardWhenThresholdNotMet() { + AllocationDecider canAllocateDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return Decision.YES; + } + }; + // ridiculously high threshold setting so we won't rebalance + Settings balancerSettings = Settings.builder().put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 1000f).build(); + Tuple rebalance = setupStateAndRebalance(canAllocateDecider, balancerSettings, false); + ClusterState clusterState = rebalance.v1(); + RebalanceDecision rebalanceDecision = rebalance.v2(); + assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type()); + assertEquals(Type.NO, rebalanceDecision.getFinalDecisionType()); + assertNotNull(rebalanceDecision.getFinalExplanation()); + assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size()); + assertNull(rebalanceDecision.getAssignedNodeId()); + } + + public void testSingleShardBalanceProducesSameResultsAsBalanceStep() { + final String[] indices = { "idx1", "idx2" }; + // Create a cluster state with 2 indices, each with 1 started primary shard, and only + // one node initially so that all primary shards get allocated to the same node. We are only + // using 2 indices (i.e. 2 total primary shards) because if we have any more than 2 started shards + // in the routing table, then we have no guarantees about the order in which the 3 or more shards + // are selected to be rebalanced to the new node, and hence the node to which they are rebalanced + // is not deterministic. Using only two shards guarantees that only one of those two shards will + // be rebalanced, and so we pick the one that was chosen to be rebalanced and execute the single-shard + // rebalance step on it to make sure it gets assigned to the same node. + ClusterState clusterState = ClusterStateCreationUtils.state(1, indices, 1); + // add new nodes so one of the primaries can be rebalanced + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterState.nodes()); + int numAddedNodes = randomIntBetween(1, 5); + // randomly select a subset of the newly added nodes to set filter allocation on (but not all) + int excludeNodesSize = randomIntBetween(0, numAddedNodes - 1); + final Set excludeNodes = new HashSet<>(); + for (int i = 0; i < numAddedNodes; i++) { + DiscoveryNode discoveryNode = newNode(randomAsciiOfLength(7)); + nodesBuilder.add(discoveryNode); + if (i < excludeNodesSize) { + excludeNodes.add(discoveryNode.getId()); + } + } + clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); + + AllocationDecider allocationDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + if (excludeNodes.contains(node.nodeId())) { + return Decision.NO; + } + return Decision.YES; + } + }; + AllocationDecider rebalanceDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { + return Decision.YES; + } + }; + List allocationDeciders = Arrays.asList(rebalanceDecider, allocationDecider); + RoutingAllocation routingAllocation = newRoutingAllocation( + new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState); + // allocate and get the node that is now relocating + BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); + allocator.allocate(routingAllocation); + ShardRouting shardToRebalance = null; + for (RoutingNode routingNode : routingAllocation.routingNodes()) { + List relocatingShards = routingNode.shardsWithState(ShardRoutingState.RELOCATING); + if (relocatingShards.size() > 0) { + shardToRebalance = randomFrom(relocatingShards); + break; + } + } + + routingAllocation = newRoutingAllocation(new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState); + routingAllocation.debugDecision(true); + ShardRouting shard = clusterState.getRoutingNodes().activePrimary(shardToRebalance.shardId()); + RebalanceDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); + assertEquals(shardToRebalance.relocatingNodeId(), rebalanceDecision.getAssignedNodeId()); + // make sure all excluded nodes returned a NO decision + for (String exludedNode : excludeNodes) { + NodeRebalanceDecision nodeRebalanceDecision = rebalanceDecision.getNodeDecisions().get(exludedNode); + assertEquals(Type.NO, nodeRebalanceDecision.getCanAllocateDecision().type()); + } + } + + private Tuple setupStateAndRebalance(AllocationDecider allocationDecider, + Settings balancerSettings, + boolean rebalanceExpected) { + AllocationDecider rebalanceDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { + return Decision.YES; + } + }; + List allocationDeciders = Arrays.asList(rebalanceDecider, allocationDecider); + final int numShards = randomIntBetween(8, 13); + BalancedShardsAllocator allocator = new BalancedShardsAllocator(balancerSettings); + ClusterState clusterState = ClusterStateCreationUtils.state("idx", 2, numShards); + // add a new node so shards can be rebalanced there + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterState.nodes()); + nodesBuilder.add(newNode(randomAsciiOfLength(7))); + clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); + ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); + RoutingAllocation routingAllocation = newRoutingAllocation( + new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState); + RebalanceDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); + + if (rebalanceExpected == false) { + assertAssignedNodeRemainsSame(allocator, routingAllocation, shard); + } + + return Tuple.tuple(clusterState, rebalanceDecision); + } + + private RoutingAllocation newRoutingAllocation(AllocationDeciders deciders, ClusterState state) { + RoutingAllocation allocation = new RoutingAllocation( + deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false + ); + allocation.debugDecision(true); + return allocation; + } + + private void assertAssignedNodeRemainsSame(BalancedShardsAllocator allocator, RoutingAllocation routingAllocation, + ShardRouting originalRouting) { + allocator.allocate(routingAllocation); + RoutingNodes routingNodes = routingAllocation.routingNodes(); + // make sure the previous node id is the same as the current one after rerouting + assertEquals(originalRouting.currentNodeId(), routingNodes.activePrimary(originalRouting.shardId()).currentNodeId()); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index 9076dde19fc..81c6685ca14 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -39,8 +39,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -/** - */ public class DeadNodesAllocationTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(DeadNodesAllocationTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java index 77e83fd6654..37e8d83592f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java @@ -37,9 +37,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -/** - * - */ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java index 873c71f19b5..f73f97b61a3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java @@ -41,8 +41,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -/** - */ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(ExpectedShardSizeAllocationTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 667ae850bfa..6063faba156 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -52,9 +52,6 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class FailedShardsRoutingTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(FailedShardsRoutingTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java index 430809e6726..79473759f8f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java @@ -41,8 +41,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.hamcrest.Matchers.equalTo; -/** - */ public class FilterRoutingTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(FilterRoutingTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java index 986d08843ad..d6e54b6e3b3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java @@ -39,9 +39,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class IndexBalanceTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(IndexBalanceTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MoveDecisionTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MoveDecisionTests.java new file mode 100644 index 00000000000..783fe690365 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MoveDecisionTests.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision.WeightedDecision; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.MoveDecision; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +/** + * Unit tests for the {@link MoveDecision} class. + */ +public class MoveDecisionTests extends ESTestCase { + + public void testCachedDecisions() { + // cached stay decision + MoveDecision stay1 = MoveDecision.stay(Decision.YES, false); + MoveDecision stay2 = MoveDecision.stay(Decision.YES, false); + assertSame(stay1, stay2); // not in explain mode, so should use cached decision + stay1 = MoveDecision.stay(Decision.YES, true); + stay2 = MoveDecision.stay(Decision.YES, true); + assertNotSame(stay1, stay2); + + // cached cannot move decision + stay1 = MoveDecision.decision(Decision.NO, Type.NO, false, null, null, null); + stay2 = MoveDecision.decision(Decision.NO, Type.NO, false, null, null, null); + assertSame(stay1, stay2); + // final decision is YES, so shouldn't use cached decision + stay1 = MoveDecision.decision(Decision.NO, Type.YES, false, null, "node1", null); + stay2 = MoveDecision.decision(Decision.NO, Type.YES, false, null, "node1", null); + assertNotSame(stay1, stay2); + assertEquals(stay1.getAssignedNodeId(), stay2.getAssignedNodeId()); + // final decision is NO, but in explain mode, so shouldn't use cached decision + stay1 = MoveDecision.decision(Decision.NO, Type.NO, true, "node1", null, null); + stay2 = MoveDecision.decision(Decision.NO, Type.NO, true, "node1", null, null); + assertNotSame(stay1, stay2); + assertSame(stay1.getFinalDecisionType(), stay2.getFinalDecisionType()); + assertNotNull(stay1.getFinalExplanation()); + assertEquals(stay1.getFinalExplanation(), stay2.getFinalExplanation()); + } + + public void testStayDecision() { + MoveDecision stay = MoveDecision.stay(Decision.YES, true); + assertFalse(stay.cannotRemain()); + assertFalse(stay.move()); + assertTrue(stay.isDecisionTaken()); + assertNull(stay.getNodeDecisions()); + assertNotNull(stay.getFinalExplanation()); + assertEquals(Type.NO, stay.getFinalDecisionType()); + + stay = MoveDecision.stay(Decision.YES, false); + assertFalse(stay.cannotRemain()); + assertFalse(stay.move()); + assertTrue(stay.isDecisionTaken()); + assertNull(stay.getNodeDecisions()); + assertNull(stay.getFinalExplanation()); + assertEquals(Type.NO, stay.getFinalDecisionType()); + } + + public void testDecisionWithExplain() { + Map nodeDecisions = new HashMap<>(); + nodeDecisions.put("node1", new WeightedDecision(randomFrom(Decision.NO, Decision.THROTTLE, Decision.YES), randomFloat())); + nodeDecisions.put("node2", new WeightedDecision(randomFrom(Decision.NO, Decision.THROTTLE, Decision.YES), randomFloat())); + MoveDecision decision = MoveDecision.decision(Decision.NO, Type.NO, true, "node1", null, nodeDecisions); + assertNotNull(decision.getFinalDecisionType()); + assertNotNull(decision.getFinalExplanation()); + assertNotNull(decision.getNodeDecisions()); + assertEquals(2, decision.getNodeDecisions().size()); + + decision = MoveDecision.decision(Decision.NO, Type.YES, true, "node1", "node2", null); + assertEquals("node2", decision.getAssignedNodeId()); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 49bcb28e86c..077466906b7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -47,7 +47,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryA import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.Snapshot; @@ -293,11 +292,11 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { public void testRebalanceDoesNotAllocatePrimaryAndReplicasOnDifferentVersionNodes() { ShardId shard1 = new ShardId("test1", "_na_", 0); ShardId shard2 = new ShardId("test2", "_na_", 0); - final DiscoveryNode newNode = new DiscoveryNode("newNode", LocalTransportAddress.buildUnique(), emptyMap(), + final DiscoveryNode newNode = new DiscoveryNode("newNode", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT); - final DiscoveryNode oldNode1 = new DiscoveryNode("oldNode1", LocalTransportAddress.buildUnique(), emptyMap(), + final DiscoveryNode oldNode1 = new DiscoveryNode("oldNode1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, VersionUtils.getPreviousVersion()); - final DiscoveryNode oldNode2 = new DiscoveryNode("oldNode2", LocalTransportAddress.buildUnique(), emptyMap(), + final DiscoveryNode oldNode2 = new DiscoveryNode("oldNode2", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, VersionUtils.getPreviousVersion()); AllocationId allocationId1P = AllocationId.newInitializing(); AllocationId allocationId1R = AllocationId.newInitializing(); @@ -336,11 +335,11 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { } public void testRestoreDoesNotAllocateSnapshotOnOlderNodes() { - final DiscoveryNode newNode = new DiscoveryNode("newNode", LocalTransportAddress.buildUnique(), emptyMap(), + final DiscoveryNode newNode = new DiscoveryNode("newNode", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT); - final DiscoveryNode oldNode1 = new DiscoveryNode("oldNode1", LocalTransportAddress.buildUnique(), emptyMap(), + final DiscoveryNode oldNode1 = new DiscoveryNode("oldNode1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, VersionUtils.getPreviousVersion()); - final DiscoveryNode oldNode2 = new DiscoveryNode("oldNode2", LocalTransportAddress.buildUnique(), emptyMap(), + final DiscoveryNode oldNode2 = new DiscoveryNode("oldNode2", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, VersionUtils.getPreviousVersion()); int numberOfShards = randomIntBetween(1, 3); @@ -372,7 +371,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { } private ClusterState stabilize(ClusterState clusterState, AllocationService service) { - logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes()); clusterState = service.deassociateDeadNodes(clusterState, true, "reroute"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); @@ -381,7 +380,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { logger.info("complete rebalancing"); boolean changed; do { - logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes()); ClusterState newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); changed = newState.equals(clusterState) == false; clusterState = newState; @@ -392,7 +391,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { } private void assertRecoveryNodeVersions(RoutingNodes routingNodes) { - logger.trace("RoutingNodes: {}", routingNodes.prettyPrint()); + logger.trace("RoutingNodes: {}", routingNodes); List mutableShardRoutings = routingNodes.shardsWithState(ShardRoutingState.RELOCATING); for (ShardRouting r : mutableShardRoutings) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java index 7e528e601d3..cf26df90002 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java @@ -35,8 +35,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.hamcrest.Matchers.equalTo; -/** - */ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocationTestCase { public void testPreferLocalPrimaryAllocationOverFiltered() { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java index 1c209157b13..d4e032f4761 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java @@ -34,8 +34,6 @@ import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.hamcrest.Matchers.equalTo; -/** - */ public class PreferPrimaryAllocationTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(PreferPrimaryAllocationTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java index d789e6c4ec6..a634d32d71d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java @@ -37,9 +37,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class PrimaryElectionRoutingTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(PrimaryElectionRoutingTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java index f2673805fa1..e5725ed61ef 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java @@ -35,9 +35,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(PrimaryNotRelocatedWhileBeingRecoveredTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index 6722e048030..23992b91541 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -159,7 +159,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase { } while (clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size() != 0 || clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size() != 0 && iterations < 200); - logger.info("Done Balancing after [{}] iterations. State:\n{}", iterations, clusterState.prettyPrint()); + logger.info("Done Balancing after [{}] iterations. State:\n{}", iterations, clusterState); // we stop after 200 iterations if it didn't stabelize by then something is likely to be wrong assertThat("max num iteration exceeded", iterations, Matchers.lessThan(200)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(0)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index ff2020d6844..ea8cbe19b7f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -43,9 +43,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class RebalanceAfterActiveTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(RebalanceAfterActiveTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java index cf9db4ec542..ab64d0131ec 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java @@ -38,9 +38,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class ReplicaAllocatedAfterPrimaryTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(ReplicaAllocatedAfterPrimaryTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index 6c837ed2b20..9401cc1ca6f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -39,9 +39,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -/** - * - */ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(IndexBalanceTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java index 74106e91dbc..794b8220511 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java @@ -22,8 +22,6 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRoutingState; -/** - */ public class RoutingNodesUtils { public static int numberOfShardsOfType(RoutingNodes nodes, ShardRoutingState state) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java index 66fe40793d5..ae87fe5332e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java @@ -34,15 +34,12 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import static java.util.Collections.emptyMap; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType; import static org.hamcrest.Matchers.equalTo; -/** - */ public class SameShardRoutingTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(SameShardRoutingTests.class); @@ -63,9 +60,9 @@ public class SameShardRoutingTests extends ESAllocationTestCase { logger.info("--> adding two nodes with the same host"); clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder() - .add(new DiscoveryNode("node1", "node1", "node1", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), + .add(new DiscoveryNode("node1", "node1", "node1", "test1", "test1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT)) - .add(new DiscoveryNode("node2", "node2", "node2", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), + .add(new DiscoveryNode("node2", "node2", "node2", "test1", "test1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT))).build(); clusterState = strategy.reroute(clusterState, "reroute"); @@ -79,7 +76,7 @@ public class SameShardRoutingTests extends ESAllocationTestCase { logger.info("--> add another node, with a different host, replicas will be allocating"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .add(new DiscoveryNode("node3", "node3", "node3", "test2", "test2", LocalTransportAddress.buildUnique(), emptyMap(), + .add(new DiscoveryNode("node3", "node3", "node3", "test2", "test2", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT))).build(); clusterState = strategy.reroute(clusterState, "reroute"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecisionTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecisionTests.java new file mode 100644 index 00000000000..d8e4570c04b --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecisionTests.java @@ -0,0 +1,146 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision.WeightedDecision; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Unit tests for the {@link ShardAllocationDecision} class. + */ +public class ShardAllocationDecisionTests extends ESTestCase { + + public void testDecisionNotTaken() { + ShardAllocationDecision shardAllocationDecision = ShardAllocationDecision.DECISION_NOT_TAKEN; + assertFalse(shardAllocationDecision.isDecisionTaken()); + assertNull(shardAllocationDecision.getFinalDecisionType()); + assertNull(shardAllocationDecision.getAllocationStatus()); + assertNull(shardAllocationDecision.getAllocationId()); + assertNull(shardAllocationDecision.getAssignedNodeId()); + assertNull(shardAllocationDecision.getFinalExplanation()); + assertNull(shardAllocationDecision.getNodeDecisions()); + expectThrows(IllegalArgumentException.class, () -> shardAllocationDecision.getFinalDecisionSafe()); + } + + public void testNoDecision() { + final AllocationStatus allocationStatus = randomFrom( + AllocationStatus.DELAYED_ALLOCATION, AllocationStatus.NO_VALID_SHARD_COPY, AllocationStatus.FETCHING_SHARD_DATA + ); + ShardAllocationDecision noDecision = ShardAllocationDecision.no(allocationStatus, "something is wrong"); + assertTrue(noDecision.isDecisionTaken()); + assertEquals(Decision.Type.NO, noDecision.getFinalDecisionType()); + assertEquals(allocationStatus, noDecision.getAllocationStatus()); + assertEquals("something is wrong", noDecision.getFinalExplanation()); + assertNull(noDecision.getNodeDecisions()); + assertNull(noDecision.getAssignedNodeId()); + assertNull(noDecision.getAllocationId()); + + Map nodeDecisions = new HashMap<>(); + nodeDecisions.put("node1", new ShardAllocationDecision.WeightedDecision(Decision.NO)); + nodeDecisions.put("node2", new ShardAllocationDecision.WeightedDecision(Decision.NO)); + noDecision = ShardAllocationDecision.no(AllocationStatus.DECIDERS_NO, "something is wrong", + nodeDecisions.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getDecision())) + ); + assertTrue(noDecision.isDecisionTaken()); + assertEquals(Decision.Type.NO, noDecision.getFinalDecisionType()); + assertEquals(AllocationStatus.DECIDERS_NO, noDecision.getAllocationStatus()); + assertEquals("something is wrong", noDecision.getFinalExplanation()); + assertEquals(nodeDecisions, noDecision.getNodeDecisions()); + assertNull(noDecision.getAssignedNodeId()); + assertNull(noDecision.getAllocationId()); + + // test bad values + expectThrows(NullPointerException.class, () -> ShardAllocationDecision.no((AllocationStatus)null, "a")); + } + + public void testThrottleDecision() { + Map nodeDecisions = new HashMap<>(); + nodeDecisions.put("node1", new ShardAllocationDecision.WeightedDecision(Decision.NO)); + nodeDecisions.put("node2", new ShardAllocationDecision.WeightedDecision(Decision.THROTTLE)); + ShardAllocationDecision throttleDecision = ShardAllocationDecision.throttle("too much happening", + nodeDecisions.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getDecision())) + ); + assertTrue(throttleDecision.isDecisionTaken()); + assertEquals(Decision.Type.THROTTLE, throttleDecision.getFinalDecisionType()); + assertEquals(AllocationStatus.DECIDERS_THROTTLED, throttleDecision.getAllocationStatus()); + assertEquals("too much happening", throttleDecision.getFinalExplanation()); + assertEquals(nodeDecisions, throttleDecision.getNodeDecisions()); + assertNull(throttleDecision.getAssignedNodeId()); + assertNull(throttleDecision.getAllocationId()); + } + + public void testYesDecision() { + Map nodeDecisions = new HashMap<>(); + nodeDecisions.put("node1", new ShardAllocationDecision.WeightedDecision(Decision.YES)); + nodeDecisions.put("node2", new ShardAllocationDecision.WeightedDecision(Decision.NO)); + String allocId = randomBoolean() ? "allocId" : null; + ShardAllocationDecision yesDecision = ShardAllocationDecision.yes( + "node1", "node was very kind", allocId, nodeDecisions.entrySet().stream().collect( + Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getDecision()) + ) + ); + assertTrue(yesDecision.isDecisionTaken()); + assertEquals(Decision.Type.YES, yesDecision.getFinalDecisionType()); + assertNull(yesDecision.getAllocationStatus()); + assertEquals("node was very kind", yesDecision.getFinalExplanation()); + assertEquals(nodeDecisions, yesDecision.getNodeDecisions()); + assertEquals("node1", yesDecision.getAssignedNodeId()); + assertEquals(allocId, yesDecision.getAllocationId()); + } + + public void testCachedDecisions() { + List cachableStatuses = Arrays.asList(AllocationStatus.DECIDERS_NO, AllocationStatus.DECIDERS_THROTTLED, + AllocationStatus.NO_VALID_SHARD_COPY, AllocationStatus.FETCHING_SHARD_DATA, AllocationStatus.DELAYED_ALLOCATION); + for (AllocationStatus allocationStatus : cachableStatuses) { + if (allocationStatus == AllocationStatus.DECIDERS_THROTTLED) { + ShardAllocationDecision cached = ShardAllocationDecision.throttle(null, null); + ShardAllocationDecision another = ShardAllocationDecision.throttle(null, null); + assertSame(cached, another); + ShardAllocationDecision notCached = ShardAllocationDecision.throttle("abc", null); + another = ShardAllocationDecision.throttle("abc", null); + assertNotSame(notCached, another); + } else { + ShardAllocationDecision cached = ShardAllocationDecision.no(allocationStatus, null); + ShardAllocationDecision another = ShardAllocationDecision.no(allocationStatus, null); + assertSame(cached, another); + ShardAllocationDecision notCached = ShardAllocationDecision.no(allocationStatus, "abc"); + another = ShardAllocationDecision.no(allocationStatus, "abc"); + assertNotSame(notCached, another); + } + } + + // yes decisions are not precomputed and cached + Map dummyMap = Collections.emptyMap(); + ShardAllocationDecision first = ShardAllocationDecision.yes("node1", "abc", "alloc1", dummyMap); + ShardAllocationDecision second = ShardAllocationDecision.yes("node1", "abc", "alloc1", dummyMap); + // same fields for the ShardAllocationDecision, but should be different instances + assertNotSame(first, second); + } + +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 85948f3c52c..7530e34cb83 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -40,8 +40,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType; import static org.hamcrest.Matchers.equalTo; -/** - */ public class ShardsLimitAllocationTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(ShardsLimitAllocationTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index dd89d6b6a52..8bd4b39d076 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -52,9 +52,6 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(SingleShardNoReplicasRoutingTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java index 0990850acae..44c8d5ac4d3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java @@ -38,9 +38,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(SingleShardOneReplicaRoutingTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java index 454e8410484..74d3dda8e36 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java @@ -70,15 +70,14 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase { logger.info("--> test starting of shard"); ClusterState newState = allocation.applyStartedShards(state, Arrays.asList(initShard)); - assertThat("failed to start " + initShard + "\ncurrent routing table:" + newState.routingTable().prettyPrint(), - newState, not(equalTo(state))); - assertTrue(initShard + "isn't started \ncurrent routing table:" + newState.routingTable().prettyPrint(), + assertThat("failed to start " + initShard + "\ncurrent routing table:" + newState.routingTable(), newState, not(equalTo(state))); + assertTrue(initShard + "isn't started \ncurrent routing table:" + newState.routingTable(), newState.routingTable().index("test").shard(initShard.id()).allShardsStarted()); state = newState; logger.info("--> testing starting of relocating shards"); newState = allocation.applyStartedShards(state, Arrays.asList(relocatingShard.getTargetRelocatingShard())); - assertThat("failed to start " + relocatingShard + "\ncurrent routing table:" + newState.routingTable().prettyPrint(), + assertThat("failed to start " + relocatingShard + "\ncurrent routing table:" + newState.routingTable(), newState, not(equalTo(state))); ShardRouting shardRouting = newState.routingTable().index("test").shard(relocatingShard.id()).getShards().get(0); assertThat(shardRouting.state(), equalTo(ShardRoutingState.STARTED)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java index 5907232b5f2..0239ee6235e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java @@ -41,9 +41,6 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(TenShardsOneReplicaRoutingTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index 894b5b42f0c..5cafe410d56 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -55,9 +55,6 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class ThrottlingAllocationTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(ThrottlingAllocationTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecisionTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecisionTests.java deleted file mode 100644 index 412cc3322f2..00000000000 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecisionTests.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing.allocation; - -import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; -import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.test.ESTestCase; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** - * Unit tests for the {@link UnassignedShardDecision} class. - */ -public class UnassignedShardDecisionTests extends ESTestCase { - - public void testDecisionNotTaken() { - UnassignedShardDecision unassignedShardDecision = UnassignedShardDecision.DECISION_NOT_TAKEN; - assertFalse(unassignedShardDecision.isDecisionTaken()); - assertNull(unassignedShardDecision.getFinalDecision()); - assertNull(unassignedShardDecision.getAllocationStatus()); - assertNull(unassignedShardDecision.getAllocationId()); - assertNull(unassignedShardDecision.getAssignedNodeId()); - assertNull(unassignedShardDecision.getFinalExplanation()); - assertNull(unassignedShardDecision.getNodeDecisions()); - expectThrows(IllegalArgumentException.class, () -> unassignedShardDecision.getFinalDecisionSafe()); - expectThrows(IllegalArgumentException.class, () -> unassignedShardDecision.getFinalExplanationSafe()); - } - - public void testNoDecision() { - final AllocationStatus allocationStatus = randomFrom( - AllocationStatus.DELAYED_ALLOCATION, AllocationStatus.NO_VALID_SHARD_COPY, AllocationStatus.FETCHING_SHARD_DATA - ); - UnassignedShardDecision noDecision = UnassignedShardDecision.noDecision(allocationStatus, "something is wrong"); - assertTrue(noDecision.isDecisionTaken()); - assertEquals(Decision.Type.NO, noDecision.getFinalDecision().type()); - assertEquals(allocationStatus, noDecision.getAllocationStatus()); - assertEquals("something is wrong", noDecision.getFinalExplanation()); - assertNull(noDecision.getNodeDecisions()); - assertNull(noDecision.getAssignedNodeId()); - assertNull(noDecision.getAllocationId()); - - Map nodeDecisions = new HashMap<>(); - nodeDecisions.put("node1", Decision.NO); - nodeDecisions.put("node2", Decision.NO); - noDecision = UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, "something is wrong", nodeDecisions); - assertTrue(noDecision.isDecisionTaken()); - assertEquals(Decision.Type.NO, noDecision.getFinalDecision().type()); - assertEquals(AllocationStatus.DECIDERS_NO, noDecision.getAllocationStatus()); - assertEquals("something is wrong", noDecision.getFinalExplanation()); - assertEquals(nodeDecisions, noDecision.getNodeDecisions()); - assertNull(noDecision.getAssignedNodeId()); - assertNull(noDecision.getAllocationId()); - - // test bad values - expectThrows(NullPointerException.class, () -> UnassignedShardDecision.noDecision(null, "a")); - expectThrows(NullPointerException.class, () -> UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, null)); - } - - public void testThrottleDecision() { - Map nodeDecisions = new HashMap<>(); - nodeDecisions.put("node1", Decision.NO); - nodeDecisions.put("node2", Decision.THROTTLE); - UnassignedShardDecision throttleDecision = UnassignedShardDecision.throttleDecision("too much happening", nodeDecisions); - assertTrue(throttleDecision.isDecisionTaken()); - assertEquals(Decision.Type.THROTTLE, throttleDecision.getFinalDecision().type()); - assertEquals(AllocationStatus.DECIDERS_THROTTLED, throttleDecision.getAllocationStatus()); - assertEquals("too much happening", throttleDecision.getFinalExplanation()); - assertEquals(nodeDecisions, throttleDecision.getNodeDecisions()); - assertNull(throttleDecision.getAssignedNodeId()); - assertNull(throttleDecision.getAllocationId()); - - // test bad values - expectThrows(NullPointerException.class, () -> UnassignedShardDecision.throttleDecision(null, Collections.emptyMap())); - } - - public void testYesDecision() { - Map nodeDecisions = new HashMap<>(); - nodeDecisions.put("node1", Decision.YES); - nodeDecisions.put("node2", Decision.NO); - String allocId = randomBoolean() ? "allocId" : null; - UnassignedShardDecision yesDecision = UnassignedShardDecision.yesDecision( - "node was very kind", "node1", allocId, nodeDecisions - ); - assertTrue(yesDecision.isDecisionTaken()); - assertEquals(Decision.Type.YES, yesDecision.getFinalDecision().type()); - assertNull(yesDecision.getAllocationStatus()); - assertEquals("node was very kind", yesDecision.getFinalExplanation()); - assertEquals(nodeDecisions, yesDecision.getNodeDecisions()); - assertEquals("node1", yesDecision.getAssignedNodeId()); - assertEquals(allocId, yesDecision.getAllocationId()); - - expectThrows(NullPointerException.class, - () -> UnassignedShardDecision.yesDecision(null, "a", randomBoolean() ? "a" : null, Collections.emptyMap())); - expectThrows(NullPointerException.class, - () -> UnassignedShardDecision.yesDecision("a", null, null, Collections.emptyMap())); - } -} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java index e711354b181..167172ec9bd 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java @@ -39,9 +39,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(UpdateNumberOfReplicasTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DecisionTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DecisionTests.java new file mode 100644 index 00000000000..3774992643d --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DecisionTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.allocation.decider; + +import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.cluster.routing.allocation.decider.Decision.Type.NO; +import static org.elasticsearch.cluster.routing.allocation.decider.Decision.Type.THROTTLE; +import static org.elasticsearch.cluster.routing.allocation.decider.Decision.Type.YES; + +/** + * A class for unit testing the {@link Decision} class. + */ +public class DecisionTests extends ESTestCase { + + /** + * Tests {@link Type#higherThan(Type)} + */ + public void testHigherThan() { + // test YES type + assertTrue(YES.higherThan(NO)); + assertTrue(YES.higherThan(THROTTLE)); + assertFalse(YES.higherThan(YES)); + + // test THROTTLE type + assertTrue(THROTTLE.higherThan(NO)); + assertFalse(THROTTLE.higherThan(THROTTLE)); + assertFalse(THROTTLE.higherThan(YES)); + + // test NO type + assertFalse(NO.higherThan(NO)); + assertFalse(NO.higherThan(THROTTLE)); + assertFalse(NO.higherThan(YES)); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 062a018a82d..c80cc9a26b9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -49,7 +49,6 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; @@ -823,9 +822,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .addAsNew(metaData.index("foo")) .build(); - DiscoveryNode discoveryNode1 = new DiscoveryNode("node1", new LocalTransportAddress("1"), emptyMap(), + DiscoveryNode discoveryNode1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT); - DiscoveryNode discoveryNode2 = new DiscoveryNode("node2", new LocalTransportAddress("2"), emptyMap(), + DiscoveryNode discoveryNode2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build(); @@ -942,9 +941,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .build(); logger.info("--> adding one master node, one data node"); - DiscoveryNode discoveryNode1 = new DiscoveryNode("", "node1", new LocalTransportAddress("1"), emptyMap(), + DiscoveryNode discoveryNode1 = new DiscoveryNode("", "node1", buildNewFakeTransportAddress(), emptyMap(), singleton(DiscoveryNode.Role.MASTER), Version.CURRENT); - DiscoveryNode discoveryNode2 = new DiscoveryNode("", "node2", new LocalTransportAddress("2"), emptyMap(), + DiscoveryNode discoveryNode2 = new DiscoveryNode("", "node2", buildNewFakeTransportAddress(), emptyMap(), singleton(DiscoveryNode.Role.DATA), Version.CURRENT); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build(); @@ -1010,7 +1009,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Add another datanode, it should relocate. logger.info("--> adding node3"); - DiscoveryNode discoveryNode3 = new DiscoveryNode("", "node3", new LocalTransportAddress("3"), emptyMap(), + DiscoveryNode discoveryNode3 = new DiscoveryNode("", "node3", buildNewFakeTransportAddress(), emptyMap(), singleton(DiscoveryNode.Role.DATA), Version.CURRENT); ClusterState updateClusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .add(discoveryNode3)).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 5e8f3415273..d3e9259994c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; @@ -70,9 +69,9 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { final Index index = metaData.index("test").getIndex(); ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); - DiscoveryNode node_0 = new DiscoveryNode("node_0", LocalTransportAddress.buildUnique(), Collections.emptyMap(), + DiscoveryNode node_0 = new DiscoveryNode("node_0", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); - DiscoveryNode node_1 = new DiscoveryNode("node_1", LocalTransportAddress.buildUnique(), Collections.emptyMap(), + DiscoveryNode node_1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); RoutingTable routingTable = RoutingTable.builder() @@ -108,9 +107,9 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss); ImmutableOpenMap.Builder shardRoutingMap = ImmutableOpenMap.builder(); - DiscoveryNode node_0 = new DiscoveryNode("node_0", LocalTransportAddress.buildUnique(), Collections.emptyMap(), + DiscoveryNode node_0 = new DiscoveryNode("node_0", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); - DiscoveryNode node_1 = new DiscoveryNode("node_1", LocalTransportAddress.buildUnique(), Collections.emptyMap(), + DiscoveryNode node_1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); MetaData metaData = MetaData.builder() @@ -224,7 +223,7 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { assertEquals(100L, DiskThresholdDecider.getExpectedShardSize(test_1, allocation, 0)); assertEquals(10L, DiskThresholdDecider.getExpectedShardSize(test_0, allocation, 0)); - RoutingNode node = new RoutingNode("node1", new DiscoveryNode("node1", new LocalTransportAddress("test"), + RoutingNode node = new RoutingNode("node1", new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), test_0, test_1.getTargetRelocatingShard(), test_2); assertEquals(100L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, false, "/dev/null")); assertEquals(90L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, true, "/dev/null")); @@ -242,7 +241,7 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { other_0 = ShardRoutingHelper.moveToStarted(other_0); other_0 = ShardRoutingHelper.relocate(other_0, "node1"); - node = new RoutingNode("node1", new DiscoveryNode("node1", new LocalTransportAddress("test"), + node = new RoutingNode("node1", new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), test_0, test_1.getTargetRelocatingShard(), test_2, other_0.getTargetRelocatingShard()); if (other_0.primary()) { assertEquals(10100L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, false, "/dev/null")); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java index 4230504f538..5be51ceb3ae 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java @@ -47,8 +47,6 @@ import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocat import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING; import static org.hamcrest.Matchers.equalTo; -/** - */ public class EnableAllocationTests extends ESAllocationTestCase { private final Logger logger = Loggers.getLogger(EnableAllocationTests.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index 3259598f694..00d9a8ff096 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.cluster.serialization; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -30,14 +31,11 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.cluster.ESAllocationTestCase; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class ClusterSerializationTests extends ESAllocationTestCase { + public void testClusterStateSerialization() throws Exception { MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1)) @@ -58,7 +56,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase { assertThat(serializedClusterState.getClusterName().value(), equalTo(clusterState.getClusterName().value())); - assertThat(serializedClusterState.routingTable().prettyPrint(), equalTo(clusterState.routingTable().prettyPrint())); + assertThat(serializedClusterState.routingTable().toString(), equalTo(clusterState.routingTable().toString())); } public void testRoutingTableSerialization() throws Exception { @@ -83,7 +81,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase { StreamInput inStream = outStream.bytes().streamInput(); RoutingTable target = RoutingTable.Builder.readFrom(inStream); - assertThat(target.prettyPrint(), equalTo(source.prettyPrint())); + assertThat(target.toString(), equalTo(source.toString())); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java index 7c11e2b8c23..9ce3d1fcee8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.serialization; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -28,17 +29,13 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; -import org.elasticsearch.cluster.ESAllocationTestCase; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsString; -/** - * - */ public class ClusterStateToStringTests extends ESAllocationTestCase { public void testClusterStateSerialization() throws Exception { MetaData metaData = MetaData.builder() @@ -50,7 +47,7 @@ public class ClusterStateToStringTests extends ESAllocationTestCase { .addAsNew(metaData.index("test_idx")) .build(); - DiscoveryNodes nodes = DiscoveryNodes.builder().add(new DiscoveryNode("node_foo", LocalTransportAddress.buildUnique(), + DiscoveryNodes nodes = DiscoveryNodes.builder().add(new DiscoveryNode("node_foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)).localNodeId("node_foo").masterNodeId("node_foo").build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes) @@ -59,7 +56,7 @@ public class ClusterStateToStringTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(); clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState, "reroute").routingTable()).build(); - String clusterStateString = clusterState.toString(); + String clusterStateString = Strings.toString(clusterState, true); assertNotNull(clusterStateString); assertThat(clusterStateString, containsString("test_idx")); diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index b8527872d70..3d345f24dbe 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -60,7 +60,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -@ESIntegTestCase.SuppressLocalMode public class ClusterServiceIT extends ESIntegTestCase { @Override @@ -69,10 +68,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } public void testAckedUpdateTask() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); + internalCluster().startNode(); ClusterService clusterService = internalCluster().getInstance(ClusterService.class); final AtomicBoolean allNodesAcked = new AtomicBoolean(false); @@ -145,10 +141,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } public void testAckedUpdateTaskSameClusterState() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); + internalCluster().startNode(); ClusterService clusterService = internalCluster().getInstance(ClusterService.class); final AtomicBoolean allNodesAcked = new AtomicBoolean(false); @@ -216,10 +209,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } public void testAckedUpdateTaskNoAckExpected() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); + internalCluster().startNode(); ClusterService clusterService = internalCluster().getInstance(ClusterService.class); final AtomicBoolean allNodesAcked = new AtomicBoolean(false); @@ -288,10 +278,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } public void testAckedUpdateTaskTimeoutZero() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); + internalCluster().startNode(); ClusterService clusterService = internalCluster().getInstance(ClusterService.class); final AtomicBoolean allNodesAcked = new AtomicBoolean(false); @@ -365,11 +352,8 @@ public class ClusterServiceIT extends ESIntegTestCase { @TestLogging("_root:debug,org.elasticsearch.action.admin.cluster.tasks:trace") public void testPendingUpdateTask() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "local") - .build(); - String node_0 = internalCluster().startNode(settings); - internalCluster().startCoordinatingOnlyNode(settings); + String node_0 = internalCluster().startNode(); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node_0); final CountDownLatch block1 = new CountDownLatch(1); @@ -501,7 +485,6 @@ public class ClusterServiceIT extends ESIntegTestCase { public void testLocalNodeMasterListenerCallbacks() throws Exception { Settings settings = Settings.builder() - .put("discovery.type", "zen") .put("discovery.zen.minimum_master_nodes", 1) .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms") .put("discovery.initial_state_timeout", "500ms") @@ -552,7 +535,7 @@ public class ClusterServiceIT extends ESIntegTestCase { // there should not be any master as the minimum number of required eligible masters is not met awaitBusy(() -> clusterService1.state().nodes().getMasterNode() == null && - clusterService1.state().status() == ClusterState.ClusterStateStatus.APPLIED); + clusterService1.clusterServiceState().getClusterStateStatus() == ClusterStateStatus.APPLIED); assertThat(testService1.master(), is(false)); // bring the node back up diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java index af5dc422e66..1ea6853ee7c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.service; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -41,8 +40,9 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.discovery.Discovery; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -54,6 +54,8 @@ import org.junit.Before; import org.junit.BeforeClass; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -115,16 +117,16 @@ public class ClusterServiceTests extends ESTestCase { TimedClusterService timedClusterService = new TimedClusterService(Settings.builder().put("cluster.name", "ClusterServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool); - timedClusterService.setLocalNode(new DiscoveryNode("node1", LocalTransportAddress.buildUnique(), emptyMap(), + timedClusterService.setLocalNode(new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)); timedClusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { @Override - public void connectToAddedNodes(ClusterChangedEvent event) { + public void connectToNodes(List addedNodes) { // skip } @Override - public void disconnectFromRemovedNodes(ClusterChangedEvent event) { + public void disconnectFromNodes(List removedNodes) { // skip } }); @@ -301,6 +303,66 @@ public class ClusterServiceTests extends ESTestCase { assertTrue(published.get()); } + public void testOneExecutorDontStarveAnother() throws InterruptedException { + final List executionOrder = Collections.synchronizedList(new ArrayList<>()); + final Semaphore allowProcessing = new Semaphore(0); + final Semaphore startedProcessing = new Semaphore(0); + + class TaskExecutor implements ClusterStateTaskExecutor { + + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + executionOrder.addAll(tasks); // do this first, so startedProcessing can be used as a notification that this is done. + startedProcessing.release(tasks.size()); + allowProcessing.acquire(tasks.size()); + return BatchResult.builder().successes(tasks).build(ClusterState.builder(currentState).build()); + } + + @Override + public boolean runOnlyOnMaster() { + return false; + } + } + + TaskExecutor executorA = new TaskExecutor(); + TaskExecutor executorB = new TaskExecutor(); + + final ClusterStateTaskConfig config = ClusterStateTaskConfig.build(Priority.NORMAL); + final ClusterStateTaskListener noopListener = (source, e) -> { throw new AssertionError(source, e); }; + // this blocks the cluster state queue, so we can set it up right + clusterService.submitStateUpdateTask("0", "A0", config, executorA, noopListener); + // wait to be processed + startedProcessing.acquire(1); + assertThat(executionOrder, equalTo(Arrays.asList("A0"))); + + + // these will be the first batch + clusterService.submitStateUpdateTask("1", "A1", config, executorA, noopListener); + clusterService.submitStateUpdateTask("2", "A2", config, executorA, noopListener); + + // release the first 0 task, but not the second + allowProcessing.release(1); + startedProcessing.acquire(2); + assertThat(executionOrder, equalTo(Arrays.asList("A0", "A1", "A2"))); + + // setup the queue with pending tasks for another executor same priority + clusterService.submitStateUpdateTask("3", "B3", config, executorB, noopListener); + clusterService.submitStateUpdateTask("4", "B4", config, executorB, noopListener); + + + clusterService.submitStateUpdateTask("5", "A5", config, executorA, noopListener); + clusterService.submitStateUpdateTask("6", "A6", config, executorA, noopListener); + + // now release the processing + allowProcessing.release(6); + + // wait for last task to be processed + startedProcessing.acquire(4); + + assertThat(executionOrder, equalTo(Arrays.asList("A0", "A1", "A2", "B3", "B4", "A5", "A6"))); + + } + // test that for a single thread, tasks are executed in the order // that they are submitted public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException { @@ -680,6 +742,7 @@ public class ClusterServiceTests extends ESTestCase { @TestLogging("org.elasticsearch.cluster.service:TRACE") // To ensure that we log cluster state events on TRACE level public void testClusterStateUpdateLogging() throws Exception { MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.start(); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1", @@ -776,6 +839,7 @@ public class ClusterServiceTests extends ESTestCase { latch.await(); } finally { Loggers.removeAppender(clusterLogger, mockAppender); + mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); } @@ -783,6 +847,7 @@ public class ClusterServiceTests extends ESTestCase { @TestLogging("org.elasticsearch.cluster.service:WARN") // To ensure that we log cluster state events on WARN level public void testLongClusterStateUpdateLogging() throws Exception { MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.start(); mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "test1 shouldn't see because setting is too low", @@ -906,10 +971,75 @@ public class ClusterServiceTests extends ESTestCase { latch.await(); } finally { Loggers.removeAppender(clusterLogger, mockAppender); + mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); } + public void testDisconnectFromNewlyAddedNodesIfClusterStatePublishingFails() throws InterruptedException { + TimedClusterService timedClusterService = new TimedClusterService(Settings.builder().put("cluster.name", + "ClusterServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool); + timedClusterService.setLocalNode(new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), + emptySet(), Version.CURRENT)); + Set currentNodes = Collections.synchronizedSet(new HashSet<>()); + currentNodes.add(timedClusterService.localNode()); + timedClusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { + @Override + public void connectToNodes(List addedNodes) { + currentNodes.addAll(addedNodes); + } + + @Override + public void disconnectFromNodes(List removedNodes) { + currentNodes.removeAll(removedNodes); + } + }); + AtomicBoolean failToCommit = new AtomicBoolean(); + timedClusterService.setClusterStatePublisher((event, ackListener) -> { + if (failToCommit.get()) { + throw new Discovery.FailedToCommitClusterStateException("just to test this"); + } + }); + timedClusterService.start(); + ClusterState state = timedClusterService.state(); + final DiscoveryNodes nodes = state.nodes(); + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes) + .masterNodeId(nodes.getLocalNodeId()); + state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) + .nodes(nodesBuilder).build(); + setState(timedClusterService, state); + + assertThat(currentNodes, equalTo(Sets.newHashSet(timedClusterService.state().getNodes()))); + + final CountDownLatch latch = new CountDownLatch(1); + + // try to add node when cluster state publishing fails + failToCommit.set(true); + timedClusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + DiscoveryNode newNode = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), + emptySet(), Version.CURRENT); + return ClusterState.builder(currentState).nodes(DiscoveryNodes.builder(currentState.nodes()).add(newNode)).build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Exception e) { + latch.countDown(); + } + }); + + latch.await(); + assertThat(currentNodes, equalTo(Sets.newHashSet(timedClusterService.state().getNodes()))); + timedClusterService.close(); + } + private static class SimpleTask { private final int id; diff --git a/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java b/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java index a88c24873f9..7c2c789c770 100644 --- a/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java @@ -36,8 +36,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.equalTo; -/** - */ @ClusterScope(scope= Scope.SUITE, numDataNodes = 2) public class ClusterSearchShardsIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index 0aad8669cb0..86fa25872e0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -385,7 +385,7 @@ public class RoutingIteratorTests extends ESAllocationTestCase { assertThat(shardIterators.iterator().next().shardId().id(), equalTo(1)); //check node preference, first without preference to see they switch - shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0;"); + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0|"); assertThat(shardIterators.size(), equalTo(1)); assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0)); String firstRoundNodeId = shardIterators.iterator().next().nextOrNull().currentNodeId(); @@ -395,12 +395,12 @@ public class RoutingIteratorTests extends ESAllocationTestCase { assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0)); assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), not(equalTo(firstRoundNodeId))); - shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0;_prefer_nodes:node1"); + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0|_prefer_nodes:node1"); assertThat(shardIterators.size(), equalTo(1)); assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0)); assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), equalTo("node1")); - shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0;_prefer_nodes:node1,node2"); + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0|_prefer_nodes:node1,node2"); assertThat(shardIterators.size(), equalTo(1)); Iterator iterator = shardIterators.iterator(); final ShardIterator it = iterator.next(); diff --git a/core/src/test/java/org/elasticsearch/common/ReleasablesTests.java b/core/src/test/java/org/elasticsearch/common/ReleasablesTests.java new file mode 100644 index 00000000000..62686354913 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/ReleasablesTests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common; + +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.atomic.AtomicInteger; + +public class ReleasablesTests extends ESTestCase { + + public void testReleaseOnce() { + AtomicInteger count = new AtomicInteger(0); + Releasable releasable = Releasables.releaseOnce(count::incrementAndGet, count::incrementAndGet); + assertEquals(0, count.get()); + releasable.close(); + assertEquals(2, count.get()); + releasable.close(); + assertEquals(2, count.get()); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/TableTests.java b/core/src/test/java/org/elasticsearch/common/TableTests.java index 7e624cb749d..c5e3c34910b 100644 --- a/core/src/test/java/org/elasticsearch/common/TableTests.java +++ b/core/src/test/java/org/elasticsearch/common/TableTests.java @@ -28,71 +28,43 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class TableTests extends ESTestCase { + public void testFailOnStartRowWithoutHeader() { Table table = new Table(); - try { - table.startRow(); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("no headers added...")); - } + Exception e = expectThrows(IllegalStateException.class, () -> table.startRow()); + assertThat(e.getMessage(), is("no headers added...")); } public void testFailOnEndHeadersWithoutStart() { Table table = new Table(); - try { - table.endHeaders(); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("no headers added...")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.endHeaders()); + assertThat(e.getMessage(), is("no headers added...")); } public void testFailOnAddCellWithoutHeader() { Table table = new Table(); - try { - table.addCell("error"); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("no block started...")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.addCell("error")); + assertThat(e.getMessage(), is("no block started...")); } public void testFailOnAddCellWithoutRow() { Table table = this.getTableWithHeaders(); - try { - table.addCell("error"); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("no block started...")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.addCell("error")); + assertThat(e.getMessage(), is("no block started...")); } public void testFailOnEndRowWithoutStart() { Table table = this.getTableWithHeaders(); - try { - table.endRow(); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("no row started...")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.endRow()); + assertThat(e.getMessage(), is("no row started...")); } public void testFailOnLessCellsThanDeclared() { Table table = this.getTableWithHeaders(); table.startRow(); table.addCell("foo"); - try { - table.endRow(true); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("mismatch on number of cells 1 in a row compared to header 2")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.endRow()); + assertThat(e.getMessage(), is("mismatch on number of cells 1 in a row compared to header 2")); } public void testOnLessCellsThanDeclaredUnchecked() { @@ -107,13 +79,8 @@ public class TableTests extends ESTestCase { table.startRow(); table.addCell("foo"); table.addCell("bar"); - try { - table.addCell("foobar"); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("can't add more cells to a row than the header")); - } - + Exception e = expectThrows(IllegalStateException.class, () -> table.addCell("foobar")); + assertThat(e.getMessage(), is("can't add more cells to a row than the header")); } public void testSimple() { @@ -200,6 +167,19 @@ public class TableTests extends ESTestCase { } + public void testAliasMap() { + Table table = new Table(); + table.startHeaders(); + table.addCell("asdf", "alias:a"); + table.addCell("ghij", "alias:g,h"); + table.endHeaders(); + Map aliasMap = table.getAliasMap(); + assertEquals(5, aliasMap.size()); + assertEquals("asdf", aliasMap.get("a")); + assertEquals("ghij", aliasMap.get("g")); + assertEquals("ghij", aliasMap.get("h")); + } + private Table getTableWithHeaders() { Table table = new Table(); table.startHeaders(); diff --git a/core/src/test/java/org/elasticsearch/common/cache/CacheBuilderTests.java b/core/src/test/java/org/elasticsearch/common/cache/CacheBuilderTests.java new file mode 100644 index 00000000000..e0a5786e184 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/cache/CacheBuilderTests.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.cache; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; + +public class CacheBuilderTests extends ESTestCase { + + public void testSettingExpireAfterAccess() { + IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterAccess(TimeValue.MINUS_ONE)); + assertThat(iae.getMessage(), containsString("expireAfterAccess <=")); + iae = expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterAccess(TimeValue.ZERO)); + assertThat(iae.getMessage(), containsString("expireAfterAccess <=")); + final TimeValue timeValue = TimeValue.parseTimeValue(randomPositiveTimeValue(), ""); + Cache cache = CacheBuilder.builder().setExpireAfterAccess(timeValue).build(); + assertEquals(timeValue.getNanos(), cache.getExpireAfterAccessNanos()); + } + + public void testSettingExpireAfterWrite() { + IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterWrite(TimeValue.MINUS_ONE)); + assertThat(iae.getMessage(), containsString("expireAfterWrite <=")); + iae = expectThrows(IllegalArgumentException.class, () -> CacheBuilder.builder().setExpireAfterWrite(TimeValue.ZERO)); + assertThat(iae.getMessage(), containsString("expireAfterWrite <=")); + final TimeValue timeValue = TimeValue.parseTimeValue(randomPositiveTimeValue(), ""); + Cache cache = CacheBuilder.builder().setExpireAfterWrite(timeValue).build(); + assertEquals(timeValue.getNanos(), cache.getExpireAfterWriteNanos()); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java index 3b88a3bdcfe..d8dbaa673a0 100644 --- a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java +++ b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java @@ -228,7 +228,7 @@ public class CacheTests extends ESTestCase { return now.get(); } }; - cache.setExpireAfterAccess(1); + cache.setExpireAfterAccessNanos(1); List evictedKeys = new ArrayList<>(); cache.setRemovalListener(notification -> { assertEquals(RemovalNotification.RemovalReason.EVICTED, notification.getRemovalReason()); @@ -265,7 +265,7 @@ public class CacheTests extends ESTestCase { return now.get(); } }; - cache.setExpireAfterWrite(1); + cache.setExpireAfterWriteNanos(1); List evictedKeys = new ArrayList<>(); cache.setRemovalListener(notification -> { assertEquals(RemovalNotification.RemovalReason.EVICTED, notification.getRemovalReason()); @@ -307,7 +307,7 @@ public class CacheTests extends ESTestCase { return now.get(); } }; - cache.setExpireAfterAccess(1); + cache.setExpireAfterAccessNanos(1); now.set(0); for (int i = 0; i < numberOfEntries; i++) { cache.put(i, Integer.toString(i)); diff --git a/core/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java b/core/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java index 0ce95077965..f2c1f853e2a 100644 --- a/core/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java +++ b/core/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java @@ -32,9 +32,6 @@ import java.util.Random; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -/** - * - */ public class DeflateCompressedXContentTests extends ESTestCase { private final Compressor compressor = new DeflateCompressor(); diff --git a/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java index abbd6ce40aa..dc01a39cb81 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java @@ -19,12 +19,6 @@ package org.elasticsearch.common.geo; -import org.locationtech.spatial4j.exception.InvalidShapeException; -import org.locationtech.spatial4j.shape.Circle; -import org.locationtech.spatial4j.shape.Point; -import org.locationtech.spatial4j.shape.Rectangle; -import org.locationtech.spatial4j.shape.Shape; -import org.locationtech.spatial4j.shape.impl.PointImpl; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.LineString; import com.vividsolutions.jts.geom.Polygon; @@ -35,6 +29,12 @@ import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilders; import org.elasticsearch.test.ESTestCase; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Circle; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Rectangle; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.impl.PointImpl; import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.assertMultiLineString; import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.assertMultiPolygon; @@ -183,17 +183,13 @@ public class ShapeBuilderTests extends ESTestCase { } public void testPolygonSelfIntersection() { - try { - ShapeBuilders.newPolygon(new CoordinatesBuilder() + PolygonBuilder newPolygon = ShapeBuilders.newPolygon(new CoordinatesBuilder() .coordinate(-40.0, 50.0) .coordinate(40.0, 50.0) .coordinate(-40.0, -50.0) - .coordinate(40.0, -50.0).close()) - .build(); - fail("Expected InvalidShapeException"); - } catch (InvalidShapeException e) { - assertThat(e.getMessage(), containsString("Self-intersection at or near point (0.0")); - } + .coordinate(40.0, -50.0).close()); + Exception e = expectThrows(InvalidShapeException.class, () -> newPolygon.build()); + assertThat(e.getMessage(), containsString("Self-intersection at or near point (0.0")); } public void testGeoCircle() { @@ -550,12 +546,8 @@ public class ShapeBuilderTests extends ESTestCase { .coordinate(179, -10) .coordinate(164, 0) )); - try { - builder.close().build(); - fail("Expected InvalidShapeException"); - } catch (InvalidShapeException e) { - assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); - } + Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build()); + assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); } public void testBoundaryShapeWithTangentialHole() { @@ -602,12 +594,8 @@ public class ShapeBuilderTests extends ESTestCase { .coordinate(176, -10) .coordinate(-177, 10) )); - try { - builder.close().build(); - fail("Expected InvalidShapeException"); - } catch (InvalidShapeException e) { - assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); - } + Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build()); + assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); } /** @@ -659,11 +647,7 @@ public class ShapeBuilderTests extends ESTestCase { .coordinate(-176, 4) .coordinate(180, 0) ); - try { - builder.close().build(); - fail("Expected InvalidShapeException"); - } catch (InvalidShapeException e) { - assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: (")); - } + Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build()); + assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: (")); } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index 00acef88b89..d71ca8761dc 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -19,10 +19,9 @@ package org.elasticsearch.common.geo.builders; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -35,11 +34,9 @@ import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; public abstract class AbstractShapeBuilderTestCase extends ESTestCase { @@ -97,11 +94,10 @@ public abstract class AbstractShapeBuilderTestCase exte /** * Test serialization and deserialization of the test shape. */ - @SuppressWarnings("unchecked") public void testSerialization() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { SB testShape = createTestShapeBuilder(); - SB deserializedShape = (SB) copyShape(testShape); + SB deserializedShape = copyShape(testShape); assertEquals(testShape, deserializedShape); assertEquals(testShape.hashCode(), deserializedShape.hashCode()); assertNotSame(testShape, deserializedShape); @@ -111,40 +107,15 @@ public abstract class AbstractShapeBuilderTestCase exte /** * Test equality and hashCode properties */ - @SuppressWarnings("unchecked") public void testEqualsAndHashcode() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { - SB firstShape = createTestShapeBuilder(); - assertFalse("shape is equal to null", firstShape.equals(null)); - assertFalse("shape is equal to incompatible type", firstShape.equals("")); - assertTrue("shape is not equal to self", firstShape.equals(firstShape)); - assertThat("same shape's hashcode returns different values if called multiple times", firstShape.hashCode(), - equalTo(firstShape.hashCode())); - assertThat("different shapes should not be equal", createMutation(firstShape), not(equalTo(firstShape))); - - SB secondShape = (SB) copyShape(firstShape); - assertTrue("shape is not equal to self", secondShape.equals(secondShape)); - assertTrue("shape is not equal to its copy", firstShape.equals(secondShape)); - assertTrue("equals is not symmetric", secondShape.equals(firstShape)); - assertThat("shape copy's hashcode is different from original hashcode", secondShape.hashCode(), equalTo(firstShape.hashCode())); - - SB thirdShape = (SB) copyShape(secondShape); - assertTrue("shape is not equal to self", thirdShape.equals(thirdShape)); - assertTrue("shape is not equal to its copy", secondShape.equals(thirdShape)); - assertThat("shape copy's hashcode is different from original hashcode", secondShape.hashCode(), equalTo(thirdShape.hashCode())); - assertTrue("equals is not transitive", firstShape.equals(thirdShape)); - assertThat("shape copy's hashcode is different from original hashcode", firstShape.hashCode(), equalTo(thirdShape.hashCode())); - assertTrue("equals is not symmetric", thirdShape.equals(secondShape)); - assertTrue("equals is not symmetric", thirdShape.equals(firstShape)); + checkEqualsAndHashCode(createTestShapeBuilder(), AbstractShapeBuilderTestCase::copyShape, this::createMutation); } } - static ShapeBuilder copyShape(ShapeBuilder original) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - original.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - return namedWriteableRegistry.getReader(ShapeBuilder.class, original.getWriteableName()).read(in); - } - } + protected static T copyShape(T original) throws IOException { + @SuppressWarnings("unchecked") + Reader reader = (Reader) namedWriteableRegistry.getReader(ShapeBuilder.class, original.getWriteableName()); + return ESTestCase.copyWriteable(original, namedWriteableRegistry, reader); } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java index 3c9ca34c6ea..348ac049f28 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.geo.builders; import com.vividsolutions.jts.geom.Coordinate; + import org.elasticsearch.common.unit.DistanceUnit; import java.io.IOException; @@ -37,7 +38,7 @@ public class CircleBuilderTests extends AbstractShapeBuilderTestCase (size-offset)"); - } - catch (IllegalArgumentException iax1) { - // expected - } - + expectThrows(IllegalArgumentException.class, () -> out.writeBytes(new byte[]{}, 0, 1)); out.close(); } @@ -245,6 +238,9 @@ public class BytesStreamsTests extends ESTestCase { assertEquals(position, out.position()); assertEquals(position, BytesReference.toBytes(out.bytes()).length); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> out.seek(Integer.MAX_VALUE + 1L)); + assertEquals("BytesStreamOutput cannot hold more than 2GB of data", iae.getMessage()); + out.close(); } @@ -258,6 +254,9 @@ public class BytesStreamsTests extends ESTestCase { out.skip(forward); assertEquals(position + forward, out.position()); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> out.skip(Integer.MAX_VALUE - 50)); + assertEquals("BytesStreamOutput cannot hold more than 2GB of data", iae.getMessage()); + out.close(); } @@ -333,18 +332,21 @@ public class BytesStreamsTests extends ESTestCase { } public void testNamedWriteable() throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList( - new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new) - )); - TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); - out.writeNamedWriteable(namedWriteableIn); - byte[] bytes = BytesReference.toBytes(out.bytes()); - StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry); - assertEquals(in.available(), bytes.length); - BaseNamedWriteable namedWriteableOut = in.readNamedWriteable(BaseNamedWriteable.class); - assertEquals(namedWriteableIn, namedWriteableOut); - assertEquals(0, in.available()); + try (BytesStreamOutput out = new BytesStreamOutput()) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList( + new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new))); + TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), + randomAsciiOfLengthBetween(1, 10)); + out.writeNamedWriteable(namedWriteableIn); + byte[] bytes = BytesReference.toBytes(out.bytes()); + + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) { + assertEquals(in.available(), bytes.length); + BaseNamedWriteable namedWriteableOut = in.readNamedWriteable(BaseNamedWriteable.class); + assertEquals(namedWriteableIn, namedWriteableOut); + assertEquals(0, in.available()); + } + } } public void testNamedWriteableList() throws IOException { @@ -367,59 +369,61 @@ public class BytesStreamsTests extends ESTestCase { } public void testNamedWriteableNotSupportedWithoutWrapping() throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - TestNamedWriteable testNamedWriteable = new TestNamedWriteable("test1", "test2"); - out.writeNamedWriteable(testNamedWriteable); - StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); - try { - in.readNamedWriteable(BaseNamedWriteable.class); - fail("Expected UnsupportedOperationException"); - } catch (UnsupportedOperationException e) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + TestNamedWriteable testNamedWriteable = new TestNamedWriteable("test1", "test2"); + out.writeNamedWriteable(testNamedWriteable); + StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); + Exception e = expectThrows(UnsupportedOperationException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class)); assertThat(e.getMessage(), is("can't read named writeable from StreamInput")); } } public void testNamedWriteableReaderReturnsNull() throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList( - new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> null) - )); - TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); - out.writeNamedWriteable(namedWriteableIn); - byte[] bytes = BytesReference.toBytes(out.bytes()); - StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry); - assertEquals(in.available(), bytes.length); - IOException e = expectThrows(IOException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class)); - assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream.")); + try (BytesStreamOutput out = new BytesStreamOutput()) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList( + new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> null))); + TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), + randomAsciiOfLengthBetween(1, 10)); + out.writeNamedWriteable(namedWriteableIn); + byte[] bytes = BytesReference.toBytes(out.bytes()); + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) { + assertEquals(in.available(), bytes.length); + IOException e = expectThrows(IOException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class)); + assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream.")); + } + } } public void testOptionalWriteableReaderReturnsNull() throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - out.writeOptionalWriteable(new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10))); - StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); - IOException e = expectThrows(IOException.class, () -> in.readOptionalWriteable((StreamInput ignored) -> null)); - assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream.")); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeOptionalWriteable(new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10))); + StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); + IOException e = expectThrows(IOException.class, () -> in.readOptionalWriteable((StreamInput ignored) -> null)); + assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream.")); + } } public void testWriteableReaderReturnsWrongName() throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.singletonList( - new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> - new TestNamedWriteable(in) { - @Override - public String getWriteableName() { - return "intentionally-broken"; - } - }) - )); - TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); - out.writeNamedWriteable(namedWriteableIn); - byte[] bytes = BytesReference.toBytes(out.bytes()); - StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry); - assertEquals(in.available(), bytes.length); - AssertionError e = expectThrows(AssertionError.class, () -> in.readNamedWriteable(BaseNamedWriteable.class)); - assertThat(e.getMessage(), - endsWith(" claims to have a different name [intentionally-broken] than it was read from [test-named-writeable].")); + try (BytesStreamOutput out = new BytesStreamOutput()) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( + Collections.singletonList(new NamedWriteableRegistry.Entry(BaseNamedWriteable.class, TestNamedWriteable.NAME, + (StreamInput in) -> new TestNamedWriteable(in) { + @Override + public String getWriteableName() { + return "intentionally-broken"; + } + }))); + TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), + randomAsciiOfLengthBetween(1, 10)); + out.writeNamedWriteable(namedWriteableIn); + byte[] bytes = BytesReference.toBytes(out.bytes()); + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) { + assertEquals(in.available(), bytes.length); + AssertionError e = expectThrows(AssertionError.class, () -> in.readNamedWriteable(BaseNamedWriteable.class)); + assertThat(e.getMessage(), + endsWith(" claims to have a different name [intentionally-broken] than it was read from [test-named-writeable].")); + } + } } public void testWriteStreamableList() throws IOException { @@ -551,32 +555,13 @@ public class BytesStreamsTests extends ESTestCase { assertEquals(-1, out.position()); // writing a single byte must fail - try { - out.writeByte((byte)0); - fail("expected IllegalStateException: stream closed"); - } - catch (IllegalStateException iex1) { - // expected - } + expectThrows(IllegalArgumentException.class, () -> out.writeByte((byte)0)); // writing in bulk must fail - try { - out.writeBytes(new byte[0], 0, 0); - fail("expected IllegalStateException: stream closed"); - } - catch (IllegalStateException iex1) { - // expected - } + expectThrows(IllegalArgumentException.class, () -> out.writeBytes(new byte[0], 0, 0)); // toByteArray() must fail - try { - BytesReference.toBytes(out.bytes()); - fail("expected IllegalStateException: stream closed"); - } - catch (IllegalStateException iex1) { - // expected - } - + expectThrows(IllegalArgumentException.class, () -> BytesReference.toBytes(out.bytes())); } // create & fill byte[] with randomized data @@ -587,16 +572,15 @@ public class BytesStreamsTests extends ESTestCase { } public void testReadWriteGeoPoint() throws IOException { - { - BytesStreamOutput out = new BytesStreamOutput(); + try (BytesStreamOutput out = new BytesStreamOutput()) {; GeoPoint geoPoint = new GeoPoint(randomDouble(), randomDouble()); out.writeGenericValue(geoPoint); StreamInput wrap = out.bytes().streamInput(); GeoPoint point = (GeoPoint) wrap.readGenericValue(); assertEquals(point, geoPoint); } - { - BytesStreamOutput out = new BytesStreamOutput(); + + try (BytesStreamOutput out = new BytesStreamOutput()) { GeoPoint geoPoint = new GeoPoint(randomDouble(), randomDouble()); out.writeGeoPoint(geoPoint); StreamInput wrap = out.bytes().streamInput(); @@ -640,12 +624,12 @@ public class BytesStreamsTests extends ESTestCase { assertNotEquals(mapKeys, reverseMapKeys); - BytesStreamOutput output = new BytesStreamOutput(); - BytesStreamOutput reverseMapOutput = new BytesStreamOutput(); - output.writeMapWithConsistentOrder(map); - reverseMapOutput.writeMapWithConsistentOrder(reverseMap); + try (BytesStreamOutput output = new BytesStreamOutput(); BytesStreamOutput reverseMapOutput = new BytesStreamOutput()) { + output.writeMapWithConsistentOrder(map); + reverseMapOutput.writeMapWithConsistentOrder(reverseMap); - assertEquals(output.bytes(), reverseMapOutput.bytes()); + assertEquals(output.bytes(), reverseMapOutput.bytes()); + } } public void testReadMapByUsingWriteMapWithConsistentOrder() throws IOException { @@ -653,18 +637,20 @@ public class BytesStreamsTests extends ESTestCase { randomMap(new HashMap<>(), randomIntBetween(2, 20), () -> randomAsciiOfLength(5), () -> randomAsciiOfLength(5)); - BytesStreamOutput streamOut = new BytesStreamOutput(); - streamOut.writeMapWithConsistentOrder(streamOutMap); - StreamInput in = StreamInput.wrap(BytesReference.toBytes(streamOut.bytes())); - Map streamInMap = in.readMap(); - assertEquals(streamOutMap, streamInMap); + try (BytesStreamOutput streamOut = new BytesStreamOutput()) { + streamOut.writeMapWithConsistentOrder(streamOutMap); + StreamInput in = StreamInput.wrap(BytesReference.toBytes(streamOut.bytes())); + Map streamInMap = in.readMap(); + assertEquals(streamOutMap, streamInMap); + } } public void testWriteMapWithConsistentOrderWithLinkedHashMapShouldThrowAssertError() throws IOException { - BytesStreamOutput output = new BytesStreamOutput(); - Map map = new LinkedHashMap<>(); - Throwable e = expectThrows(AssertionError.class, () -> output.writeMapWithConsistentOrder(map)); - assertEquals(AssertionError.class, e.getClass()); + try (BytesStreamOutput output = new BytesStreamOutput()) { + Map map = new LinkedHashMap<>(); + Throwable e = expectThrows(AssertionError.class, () -> output.writeMapWithConsistentOrder(map)); + assertEquals(AssertionError.class, e.getClass()); + } } private static Map randomMap(Map map, int size, Supplier keyGenerator, Supplier valueGenerator) { diff --git a/core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java b/core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java index b1d0abc96cf..ca9a6b3a1ab 100644 --- a/core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java @@ -25,8 +25,8 @@ import org.elasticsearch.test.ESTestCase; import org.joda.time.DateTimeZone; import java.util.TimeZone; -import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.LongSupplier; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -36,26 +36,17 @@ public class DateMathParserTests extends ESTestCase { FormatDateTimeFormatter formatter = Joda.forPattern("dateOptionalTime||epoch_millis"); DateMathParser parser = new DateMathParser(formatter); - private static Callable callable(final long value) { - return new Callable() { - @Override - public Long call() throws Exception { - return value; - } - }; - } - void assertDateMathEquals(String toTest, String expected) { assertDateMathEquals(toTest, expected, 0, false, null); } void assertDateMathEquals(String toTest, String expected, final long now, boolean roundUp, DateTimeZone timeZone) { - long gotMillis = parser.parse(toTest, callable(now), roundUp, timeZone); + long gotMillis = parser.parse(toTest, () -> now, roundUp, timeZone); assertDateEquals(gotMillis, toTest, expected); } void assertDateEquals(long gotMillis, String original, String expected) { - long expectedMillis = parser.parse(expected, callable(0)); + long expectedMillis = parser.parse(expected, () -> 0); if (gotMillis != expectedMillis) { fail("Date math not equal\n" + "Original : " + original + "\n" + @@ -77,16 +68,38 @@ public class DateMathParserTests extends ESTestCase { } public void testRoundingDoesNotAffectExactDate() { - assertDateMathEquals("2014-11-12T22:55:00Z", "2014-11-12T22:55:00Z", 0, true, null); - assertDateMathEquals("2014-11-12T22:55:00Z", "2014-11-12T22:55:00Z", 0, false, null); + assertDateMathEquals("2014-11-12T22:55:00.000Z", "2014-11-12T22:55:00.000Z", 0, true, null); + assertDateMathEquals("2014-11-12T22:55:00.000Z", "2014-11-12T22:55:00.000Z", 0, false, null); + + assertDateMathEquals("2014-11-12T22:55:00.000", "2014-11-12T21:55:00.000Z", 0, true, DateTimeZone.forID("+01:00")); + assertDateMathEquals("2014-11-12T22:55:00.000", "2014-11-12T21:55:00.000Z", 0, false, DateTimeZone.forID("+01:00")); + + assertDateMathEquals("2014-11-12T22:55:00.000+01:00", "2014-11-12T21:55:00.000Z", 0, true, null); + assertDateMathEquals("2014-11-12T22:55:00.000+01:00", "2014-11-12T21:55:00.000Z", 0, false, null); } public void testTimezone() { // timezone works within date format assertDateMathEquals("2014-05-30T20:21+02:00", "2014-05-30T18:21:00.000"); + // test alternative ways of writing zero offsets, according to ISO 8601 +00:00, +00, +0000 should work. + // joda also seems to allow for -00:00, -00, -0000 + assertDateMathEquals("2014-05-30T18:21+00:00", "2014-05-30T18:21:00.000"); + assertDateMathEquals("2014-05-30T18:21+00", "2014-05-30T18:21:00.000"); + assertDateMathEquals("2014-05-30T18:21+0000", "2014-05-30T18:21:00.000"); + assertDateMathEquals("2014-05-30T18:21-00:00", "2014-05-30T18:21:00.000"); + assertDateMathEquals("2014-05-30T18:21-00", "2014-05-30T18:21:00.000"); + assertDateMathEquals("2014-05-30T18:21-0000", "2014-05-30T18:21:00.000"); + // but also externally assertDateMathEquals("2014-05-30T20:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("+02:00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("+00:00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("+00:00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("+00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("+0000")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("-00:00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("-00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("-0000")); // and timezone in the date has priority assertDateMathEquals("2014-05-30T20:21+03:00", "2014-05-30T17:21:00.000", 0, false, DateTimeZone.forID("-08:00")); @@ -132,7 +145,7 @@ public class DateMathParserTests extends ESTestCase { public void testNow() { - final long now = parser.parse("2014-11-18T14:27:32", callable(0), false, null); + final long now = parser.parse("2014-11-18T14:27:32", () -> 0, false, null); assertDateMathEquals("now", "2014-11-18T14:27:32", now, false, null); assertDateMathEquals("now+M", "2014-12-18T14:27:32", now, false, null); @@ -143,7 +156,43 @@ public class DateMathParserTests extends ESTestCase { assertDateMathEquals("now/m", "2014-11-18T14:27", now, false, DateTimeZone.forID("+02:00")); } - public void testRounding() { + public void testRoundingPreservesEpochAsBaseDate() { + // If a user only specifies times, then the date needs to always be 1970-01-01 regardless of rounding + FormatDateTimeFormatter formatter = Joda.forPattern("HH:mm:ss"); + DateMathParser parser = new DateMathParser(formatter); + assertEquals( + this.formatter.parser().parseMillis("1970-01-01T04:52:20.000Z"), + parser.parse("04:52:20", () -> 0, false, null)); + assertEquals( + this.formatter.parser().parseMillis("1970-01-01T04:52:20.999Z"), + parser.parse("04:52:20", () -> 0, true, null)); + } + + // Implicit rounding happening when parts of the date are not specified + public void testImplicitRounding() { + assertDateMathEquals("2014-11-18", "2014-11-18", 0, false, null); + assertDateMathEquals("2014-11-18", "2014-11-18T23:59:59.999Z", 0, true, null); + + assertDateMathEquals("2014-11-18T09:20", "2014-11-18T09:20", 0, false, null); + assertDateMathEquals("2014-11-18T09:20", "2014-11-18T09:20:59.999Z", 0, true, null); + + assertDateMathEquals("2014-11-18", "2014-11-17T23:00:00.000Z", 0, false, DateTimeZone.forID("CET")); + assertDateMathEquals("2014-11-18", "2014-11-18T22:59:59.999Z", 0, true, DateTimeZone.forID("CET")); + + assertDateMathEquals("2014-11-18T09:20", "2014-11-18T08:20:00.000Z", 0, false, DateTimeZone.forID("CET")); + assertDateMathEquals("2014-11-18T09:20", "2014-11-18T08:20:59.999Z", 0, true, DateTimeZone.forID("CET")); + + // implicit rounding with explicit timezone in the date format + FormatDateTimeFormatter formatter = Joda.forPattern("YYYY-MM-ddZ"); + DateMathParser parser = new DateMathParser(formatter); + long time = parser.parse("2011-10-09+01:00", () -> 0, false, null); + assertEquals(this.parser.parse("2011-10-09T00:00:00.000+01:00", () -> 0), time); + time = parser.parse("2011-10-09+01:00", () -> 0, true, null); + assertEquals(this.parser.parse("2011-10-09T23:59:59.999+01:00", () -> 0), time); + } + + // Explicit rounding using the || separator + public void testExplicitRounding() { assertDateMathEquals("2014-11-18||/y", "2014-01-01", 0, false, null); assertDateMathEquals("2014-11-18||/y", "2014-12-31T23:59:59.999", 0, true, null); assertDateMathEquals("2014||/y", "2014-01-01", 0, false, null); @@ -204,7 +253,7 @@ public class DateMathParserTests extends ESTestCase { // also check other time units DateMathParser parser = new DateMathParser(Joda.forPattern("epoch_second||dateOptionalTime")); - long datetime = parser.parse("1418248078", callable(0)); + long datetime = parser.parse("1418248078", () -> 0); assertDateEquals(datetime, "1418248078", "2014-12-10T21:47:58.000"); // a timestamp before 10000 is a year @@ -217,7 +266,7 @@ public class DateMathParserTests extends ESTestCase { void assertParseException(String msg, String date, String exc) { try { - parser.parse(date, callable(0)); + parser.parse(date, () -> 0); fail("Date: " + date + "\n" + msg); } catch (ElasticsearchParseException e) { assertThat(ExceptionsHelper.detailedMessage(e).contains(exc), equalTo(true)); @@ -239,12 +288,9 @@ public class DateMathParserTests extends ESTestCase { public void testOnlyCallsNowIfNecessary() { final AtomicBoolean called = new AtomicBoolean(); - final Callable now = new Callable() { - @Override - public Long call() throws Exception { - called.set(true); - return 42L; - } + final LongSupplier now = () -> { + called.set(true); + return 42L; }; parser.parse("2014-11-18T14:27:32", now, false, null); assertFalse(called.get()); @@ -255,7 +301,7 @@ public class DateMathParserTests extends ESTestCase { public void testThatUnixTimestampMayNotHaveTimeZone() { DateMathParser parser = new DateMathParser(Joda.forPattern("epoch_millis")); try { - parser.parse("1234567890123", callable(42), false, DateTimeZone.forTimeZone(TimeZone.getTimeZone("CET"))); + parser.parse("1234567890123", () -> 42, false, DateTimeZone.forTimeZone(TimeZone.getTimeZone("CET"))); fail("Expected ElasticsearchParseException"); } catch(ElasticsearchParseException e) { assertThat(e.getMessage(), containsString("failed to parse date field")); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java index 7c1aaf3a3c7..ff8b25c796d 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java @@ -44,9 +44,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class SimpleAllTests extends ESTestCase { private FieldType getAllFieldType() { FieldType ft = new FieldType(); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java index 3d1b0fdb842..546d62a0e1f 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java @@ -54,8 +54,6 @@ import java.util.Set; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -/** - */ public class FreqTermsEnumTests extends ESTestCase { private String[] terms; diff --git a/core/src/test/java/org/elasticsearch/common/lucene/search/MatchNoDocsQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/search/MatchNoDocsQueryTests.java deleted file mode 100644 index b328fd2ee9b..00000000000 --- a/core/src/test/java/org/elasticsearch/common/lucene/search/MatchNoDocsQueryTests.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.search; - -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.TextField; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.test.ESTestCase; - -import static org.hamcrest.Matchers.equalTo; - -public class MatchNoDocsQueryTests extends ESTestCase { - public void testSimple() throws Exception { - MatchNoDocsQuery query = new MatchNoDocsQuery("field 'title' not found"); - assertThat(query.toString(), equalTo("MatchNoDocsQuery[\"field 'title' not found\"]")); - Query rewrite = query.rewrite(null); - assertTrue(rewrite instanceof MatchNoDocsQuery); - assertThat(rewrite.toString(), equalTo("MatchNoDocsQuery[\"field 'title' not found\"]")); - } - - public void testSearch() throws Exception { - IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); - Document doc = new Document(); - doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED)); - writer.addDocument(doc); - IndexReader reader = DirectoryReader.open(writer); - IndexSearcher searcher = new IndexSearcher(reader); - - Query query = new MatchNoDocsQuery("field not found"); - assertThat(searcher.count(query), equalTo(0)); - - BooleanQuery.Builder bq = new BooleanQuery.Builder(); - bq.add(new BooleanClause(new TermQuery(new Term("field", "aaa")), BooleanClause.Occur.SHOULD)); - bq.add(new BooleanClause(new MatchNoDocsQuery("field not found"), BooleanClause.Occur.MUST)); - query = bq.build(); - assertThat(searcher.count(query), equalTo(0)); - assertThat(query.toString(), equalTo("field:aaa +MatchNoDocsQuery[\"field not found\"]")); - - - bq = new BooleanQuery.Builder(); - bq.add(new BooleanClause(new TermQuery(new Term("field", "aaa")), BooleanClause.Occur.SHOULD)); - bq.add(new BooleanClause(new MatchNoDocsQuery("field not found"), BooleanClause.Occur.SHOULD)); - query = bq.build(); - assertThat(query.toString(), equalTo("field:aaa MatchNoDocsQuery[\"field not found\"]")); - assertThat(searcher.count(query), equalTo(1)); - Query rewrite = query.rewrite(reader); - assertThat(rewrite.toString(), equalTo("field:aaa MatchNoDocsQuery[\"field not found\"]")); - } -} diff --git a/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java index 0dcce74c1d2..fb09ceb839c 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java @@ -35,9 +35,6 @@ import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class MoreLikeThisQueryTests extends ESTestCase { public void testSimple() throws Exception { Directory dir = new RAMDirectory(); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java b/core/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java index e8d8b914a4d..74de6b77f77 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java @@ -30,9 +30,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; -/** - * - */ public class InputStreamIndexInputTests extends ESTestCase { public void testSingleReadSingleByteLimit() throws IOException { RAMDirectory dir = new RAMDirectory(); diff --git a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java index 2daea859671..3e29068e672 100644 --- a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java +++ b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -34,14 +34,13 @@ import org.elasticsearch.http.HttpStats; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.cat.AbstractCatAction; -import org.elasticsearch.test.transport.AssertingLocalTransport; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportInterceptor; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -51,12 +50,6 @@ import java.util.function.Supplier; public class NetworkModuleTests extends ModuleTestCase { - static class FakeTransport extends AssertingLocalTransport { - public FakeTransport() { - super(null, null, null, null); - } - } - static class FakeHttpTransport extends AbstractLifecycleComponent implements HttpServerTransport { public FakeHttpTransport() { super(null); @@ -89,7 +82,7 @@ public class NetworkModuleTests extends ModuleTestCase { super(null); } @Override - public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception {} + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { return channel -> {}; } } static class FakeCatRestHandler extends AbstractCatAction { @@ -97,7 +90,7 @@ public class NetworkModuleTests extends ModuleTestCase { super(null); } @Override - protected void doRequest(RestRequest request, RestChannel channel, NodeClient client) {} + protected RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { return channel -> {}; } @Override protected void documentation(StringBuilder sb) {} @Override @@ -110,7 +103,7 @@ public class NetworkModuleTests extends ModuleTestCase { Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom") .put(NetworkModule.HTTP_ENABLED.getKey(), false) .build(); - Supplier custom = FakeTransport::new; + Supplier custom = () -> null; // content doesn't matter we check reference equality NetworkPlugin plugin = new NetworkPlugin() { @Override public Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, @@ -166,7 +159,7 @@ public class NetworkModuleTests extends ModuleTestCase { .put(NetworkModule.HTTP_DEFAULT_TYPE_SETTING.getKey(), "default_custom") .put(NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.getKey(), "local") .put(NetworkModule.TRANSPORT_TYPE_KEY, "default_custom").build(); - Supplier customTransport = FakeTransport::new; + Supplier customTransport = () -> null; // content doesn't matter we check reference equality Supplier custom = FakeHttpTransport::new; Supplier def = FakeHttpTransport::new; NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() { @@ -200,7 +193,7 @@ public class NetworkModuleTests extends ModuleTestCase { .put(NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.getKey(), "default_custom").build(); Supplier custom = FakeHttpTransport::new; Supplier def = FakeHttpTransport::new; - Supplier customTransport = FakeTransport::new; + Supplier customTransport = () -> null; NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() { @Override public Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, @@ -236,7 +229,7 @@ public class NetworkModuleTests extends ModuleTestCase { }; NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() { @Override - public List getTransportInterceptors() { + public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry) { return Collections.singletonList(interceptor); } }); @@ -249,7 +242,7 @@ public class NetworkModuleTests extends ModuleTestCase { NullPointerException nullPointerException = expectThrows(NullPointerException.class, () -> { newNetworkModule(settings, false, new NetworkPlugin() { @Override - public List getTransportInterceptors() { + public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry) { return Collections.singletonList(null); } }); diff --git a/core/src/test/java/org/elasticsearch/common/path/PathTrieTests.java b/core/src/test/java/org/elasticsearch/common/path/PathTrieTests.java index 0121b855068..f20935406a0 100644 --- a/core/src/test/java/org/elasticsearch/common/path/PathTrieTests.java +++ b/core/src/test/java/org/elasticsearch/common/path/PathTrieTests.java @@ -28,9 +28,6 @@ import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class PathTrieTests extends ESTestCase { public static final PathTrie.Decoder NO_DECODER = new PathTrie.Decoder() { diff --git a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java index ff83ddfa57d..159b8693b84 100644 --- a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java +++ b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java @@ -30,6 +30,8 @@ import org.hamcrest.TypeSafeMatcher; import org.joda.time.DateTime; import org.joda.time.DateTimeConstants; import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.ISODateTimeFormat; import java.util.ArrayList; @@ -41,9 +43,8 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.startsWith; -/** - */ public class TimeZoneRoundingTests extends ESTestCase { public void testUTCTimeUnitRounding() { @@ -513,6 +514,25 @@ public class TimeZoneRoundingTests extends ESTestCase { } } + /** + * Test that time zones are correctly parsed. There is a bug with + * Joda 2.9.4 (see https://github.com/JodaOrg/joda-time/issues/373) + */ + public void testsTimeZoneParsing() { + final DateTime expected = new DateTime(2016, 11, 10, 5, 37, 59, randomDateTimeZone()); + + // Formatter used to print and parse the sample date. + // Printing the date works but parsing it back fails + // with Joda 2.9.4 + DateTimeFormatter formatter = DateTimeFormat.forPattern("YYYY-MM-dd'T'HH:mm:ss " + randomFrom("ZZZ", "[ZZZ]", "'['ZZZ']'")); + + String dateTimeAsString = formatter.print(expected); + assertThat(dateTimeAsString, startsWith("2016-11-10T05:37:59 ")); + + DateTime parsedDateTime = formatter.parseDateTime(dateTimeAsString); + assertThat(parsedDateTime.getZone(), equalTo(expected.getZone())); + } + private static void assertInterval(long rounded, long nextRoundingValue, Rounding rounding, int minutes, DateTimeZone tz) { assertInterval(rounded, dateBetween(rounded, nextRoundingValue), nextRoundingValue, rounding, tz); diff --git a/core/src/test/java/org/elasticsearch/common/transport/BoundTransportAddressTests.java b/core/src/test/java/org/elasticsearch/common/transport/BoundTransportAddressTests.java index 1a3fa4db137..a3a1178473d 100644 --- a/core/src/test/java/org/elasticsearch/common/transport/BoundTransportAddressTests.java +++ b/core/src/test/java/org/elasticsearch/common/transport/BoundTransportAddressTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.transport; -import org.elasticsearch.common.io.stream.ByteBufferStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; @@ -40,12 +39,12 @@ public class BoundTransportAddressTests extends ESTestCase { public void testSerialization() throws Exception { InetAddress[] inetAddresses = InetAddress.getAllByName("0.0.0.0"); - List transportAddressList = new ArrayList<>(); + List transportAddressList = new ArrayList<>(); for (InetAddress address : inetAddresses) { - transportAddressList.add(new InetSocketTransportAddress(address, randomIntBetween(9200, 9299))); + transportAddressList.add(new TransportAddress(address, randomIntBetween(9200, 9299))); } final BoundTransportAddress transportAddress = - new BoundTransportAddress(transportAddressList.toArray(new InetSocketTransportAddress[0]), transportAddressList.get(0)); + new BoundTransportAddress(transportAddressList.toArray(new TransportAddress[0]), transportAddressList.get(0)); assertThat(transportAddress.boundAddresses().length, equalTo(transportAddressList.size())); // serialize @@ -75,7 +74,7 @@ public class BoundTransportAddressTests extends ESTestCase { public void testBadBoundAddressArray() { try { TransportAddress[] badArray = randomBoolean() ? null : new TransportAddress[0]; - new BoundTransportAddress(badArray, new InetSocketTransportAddress(InetAddress.getLoopbackAddress(), 80)); + new BoundTransportAddress(badArray, new TransportAddress(InetAddress.getLoopbackAddress(), 80)); fail("expected an exception to be thrown due to no bound address"); } catch (IllegalArgumentException e) { //expected diff --git a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java index 5296e226faa..6ad5a1ce32a 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java @@ -31,9 +31,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -/** - * - */ public class ByteSizeValueTests extends ESTestCase { public void testActualPeta() { MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.PB).getBytes(), equalTo(4503599627370496L)); @@ -126,48 +123,69 @@ public class ByteSizeValueTests extends ESTestCase { } public void testFailOnMissingUnits() { - try { - ByteSizeValue.parseBytesSizeValue("23", "test"); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("failed to parse setting [test]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> ByteSizeValue.parseBytesSizeValue("23", "test")); + assertThat(e.getMessage(), containsString("failed to parse setting [test]")); } public void testFailOnUnknownUnits() { - try { - ByteSizeValue.parseBytesSizeValue("23jw", "test"); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("failed to parse setting [test]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> ByteSizeValue.parseBytesSizeValue("23jw", "test")); + assertThat(e.getMessage(), containsString("failed to parse setting [test]")); } public void testFailOnEmptyParsing() { - try { - assertThat(ByteSizeValue.parseBytesSizeValue("", "emptyParsing").toString(), is("23kb")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("failed to parse setting [emptyParsing]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> assertThat(ByteSizeValue.parseBytesSizeValue("", "emptyParsing").toString(), is("23kb"))); + assertThat(e.getMessage(), containsString("failed to parse setting [emptyParsing]")); } public void testFailOnEmptyNumberParsing() { - try { - assertThat(ByteSizeValue.parseBytesSizeValue("g", "emptyNumberParsing").toString(), is("23b")); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("failed to parse [g]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, + () -> assertThat(ByteSizeValue.parseBytesSizeValue("g", "emptyNumberParsing").toString(), is("23b"))); + assertThat(e.getMessage(), containsString("failed to parse [g]")); } public void testNoDotsAllowed() { - try { - ByteSizeValue.parseBytesSizeValue("42b.", null, "test"); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("failed to parse setting [test]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> ByteSizeValue.parseBytesSizeValue("42b.", null, "test")); + assertThat(e.getMessage(), containsString("failed to parse setting [test]")); + } + + public void testCompareEquality() { + long firstRandom = randomPositiveLong(); + ByteSizeUnit randomUnit = randomFrom(ByteSizeUnit.values()); + ByteSizeValue firstByteValue = new ByteSizeValue(firstRandom, randomUnit); + ByteSizeValue secondByteValue = new ByteSizeValue(firstRandom, randomUnit); + assertEquals(0, firstByteValue.compareTo(secondByteValue)); + } + + public void testCompareValue() { + long firstRandom = randomPositiveLong(); + long secondRandom = randomValueOtherThan(firstRandom, ESTestCase::randomPositiveLong); + ByteSizeUnit unit = randomFrom(ByteSizeUnit.values()); + ByteSizeValue firstByteValue = new ByteSizeValue(firstRandom, unit); + ByteSizeValue secondByteValue = new ByteSizeValue(secondRandom, unit); + assertEquals(firstRandom > secondRandom, firstByteValue.compareTo(secondByteValue) > 0); + assertEquals(secondRandom > firstRandom, secondByteValue.compareTo(firstByteValue) > 0); + } + + public void testCompareUnits() { + long number = randomPositiveLong(); + ByteSizeUnit randomUnit = randomValueOtherThan(ByteSizeUnit.PB, ()->randomFrom(ByteSizeUnit.values())); + ByteSizeValue firstByteValue = new ByteSizeValue(number, randomUnit); + ByteSizeValue secondByteValue = new ByteSizeValue(number, ByteSizeUnit.PB); + assertTrue(firstByteValue.compareTo(secondByteValue) < 0); + assertTrue(secondByteValue.compareTo(firstByteValue) > 0); + } + + public void testEdgeCompare() { + ByteSizeValue maxLongValuePB = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.PB); + ByteSizeValue maxLongValueB = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES); + assertTrue(maxLongValuePB.compareTo(maxLongValueB) > 0); + } + + public void testConversionHashCode() { + ByteSizeValue firstValue = new ByteSizeValue(randomIntBetween(0, Integer.MAX_VALUE), ByteSizeUnit.GB); + ByteSizeValue secondValue = new ByteSizeValue(firstValue.getBytes(), ByteSizeUnit.BYTES); + assertEquals(firstValue.hashCode(), secondValue.hashCode()); } public void testSerialization() throws IOException { @@ -176,7 +194,7 @@ public class ByteSizeValueTests extends ESTestCase { byteSizeValue.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { ByteSizeValue deserializedByteSizeValue = new ByteSizeValue(in); - assertEquals(byteSizeValue, deserializedByteSizeValue); + assertEquals(byteSizeValue.getBytes(), deserializedByteSizeValue.getBytes()); } } } diff --git a/core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java b/core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java index 7c5463baed2..3d8ecdba424 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java @@ -28,9 +28,6 @@ import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class DistanceUnitTests extends ESTestCase { public void testSimpleDistanceUnit() { assertThat(DistanceUnit.KILOMETERS.convert(10, DistanceUnit.MILES), closeTo(16.09344, 0.001)); diff --git a/core/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java index b5fc54de7d0..3a97a11308b 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java @@ -23,9 +23,6 @@ import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -/** - * - */ public class SizeValueTests extends ESTestCase { public void testThatConversionWorks() { SizeValue sizeValue = new SizeValue(1000); @@ -67,4 +64,37 @@ public class SizeValueTests extends ESTestCase { assertThat(e.getMessage(), containsString("may not be negative")); } } + + public void testCompareEquality() { + long randomValue = randomPositiveLong(); + SizeUnit randomUnit = randomFrom(SizeUnit.values()); + SizeValue firstValue = new SizeValue(randomValue, randomUnit); + SizeValue secondValue = new SizeValue(randomValue, randomUnit); + assertEquals(0, firstValue.compareTo(secondValue)); + } + + public void testCompareValue() { + long firstRandom = randomPositiveLong(); + long secondRandom = randomValueOtherThan(firstRandom, ESTestCase::randomPositiveLong); + SizeUnit unit = randomFrom(SizeUnit.values()); + SizeValue firstSizeValue = new SizeValue(firstRandom, unit); + SizeValue secondSizeValue = new SizeValue(secondRandom, unit); + assertEquals(firstRandom > secondRandom, firstSizeValue.compareTo(secondSizeValue) > 0); + assertEquals(secondRandom > firstRandom, secondSizeValue.compareTo(firstSizeValue) > 0); + } + + public void testCompareUnits() { + long number = randomPositiveLong(); + SizeUnit randomUnit = randomValueOtherThan(SizeUnit.PETA, ()->randomFrom(SizeUnit.values())); + SizeValue firstValue = new SizeValue(number, randomUnit); + SizeValue secondValue = new SizeValue(number, SizeUnit.PETA); + assertTrue(firstValue.compareTo(secondValue) < 0); + assertTrue(secondValue.compareTo(firstValue) > 0); + } + + public void testConversionHashCode() { + SizeValue firstValue = new SizeValue(randomIntBetween(0, Integer.MAX_VALUE), SizeUnit.GIGA); + SizeValue secondValue = new SizeValue(firstValue.getSingles(), SizeUnit.SINGLE); + assertEquals(firstValue.hashCode(), secondValue.hashCode()); + } } diff --git a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index 4d0ac5257a3..1f7e876f856 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -224,4 +224,37 @@ public class TimeValueTests extends ESTestCase { assertEquals("36h", new TimeValue(36, TimeUnit.HOURS).getStringRep()); assertEquals("1000d", new TimeValue(1000, TimeUnit.DAYS).getStringRep()); } + + public void testCompareEquality() { + long randomLong = randomPositiveLong(); + TimeUnit randomUnit = randomFrom(TimeUnit.values()); + TimeValue firstValue = new TimeValue(randomLong, randomUnit); + TimeValue secondValue = new TimeValue(randomLong, randomUnit); + assertEquals(0, firstValue.compareTo(secondValue)); + } + + public void testCompareValue() { + long firstRandom = randomPositiveLong(); + long secondRandom = randomValueOtherThan(firstRandom, ESTestCase::randomPositiveLong); + TimeUnit unit = randomFrom(TimeUnit.values()); + TimeValue firstValue = new TimeValue(firstRandom, unit); + TimeValue secondValue = new TimeValue(secondRandom, unit); + assertEquals(firstRandom > secondRandom, firstValue.compareTo(secondValue) > 0); + assertEquals(secondRandom > firstRandom, secondValue.compareTo(firstValue) > 0); + } + + public void testCompareUnits() { + long number = randomPositiveLong(); + TimeUnit randomUnit = randomValueOtherThan(TimeUnit.DAYS, ()->randomFrom(TimeUnit.values())); + TimeValue firstValue = new TimeValue(number, randomUnit); + TimeValue secondValue = new TimeValue(number, TimeUnit.DAYS); + assertTrue(firstValue.compareTo(secondValue) < 0); + assertTrue(secondValue.compareTo(firstValue) > 0); + } + + public void testConversionHashCode() { + TimeValue firstValue = new TimeValue(randomIntBetween(0, Integer.MAX_VALUE), TimeUnit.MINUTES); + TimeValue secondValue = new TimeValue(firstValue.getSeconds(), TimeUnit.SECONDS); + assertEquals(firstValue.hashCode(), secondValue.hashCode()); + } } diff --git a/core/src/test/java/org/elasticsearch/common/util/ArrayUtilsTests.java b/core/src/test/java/org/elasticsearch/common/util/ArrayUtilsTests.java index 172a064b698..8a346ed4fbe 100644 --- a/core/src/test/java/org/elasticsearch/common/util/ArrayUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/ArrayUtilsTests.java @@ -27,9 +27,6 @@ import java.util.BitSet; import static org.hamcrest.Matchers.is; -/** - * - */ public class ArrayUtilsTests extends ESTestCase { public void testBinarySearch() throws Exception { for (int j = 0; j < 100; j++) { diff --git a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 6fadb6e5506..301f48f9b04 100644 --- a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -27,14 +27,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.ESTestCase; import org.junit.Before; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.Arrays; -public class BigArraysTests extends ESSingleNodeTestCase { +public class BigArraysTests extends ESTestCase { private BigArrays randombigArrays() { return new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()); diff --git a/core/src/test/java/org/elasticsearch/common/util/ExtensionPointTests.java b/core/src/test/java/org/elasticsearch/common/util/ExtensionPointTests.java deleted file mode 100644 index 8fabbcc60ae..00000000000 --- a/core/src/test/java/org/elasticsearch/common/util/ExtensionPointTests.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.util; - -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; - -import org.elasticsearch.common.inject.Binder; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.Injector; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.inject.ModulesBuilder; -import org.elasticsearch.test.ESTestCase; - -public class ExtensionPointTests extends ESTestCase { - - public void testClassSet() { - final ExtensionPoint.ClassSet allocationDeciders = new ExtensionPoint.ClassSet<>("test_class", TestBaseClass.class, Consumer.class); - allocationDeciders.registerExtension(TestImpl.class); - Injector injector = new ModulesBuilder().add(new Module() { - @Override - public void configure(Binder binder) { - allocationDeciders.bind(binder); - } - }).createInjector(); - assertEquals(1, TestImpl.instances.get()); - - } - - public static class TestBaseClass {} - - public static class Consumer { - @Inject - public Consumer(Set deciders, TestImpl other) { - // we require the TestImpl more than once to ensure it's bound as a singleton - } - } - - public static class TestImpl extends TestBaseClass { - static final AtomicInteger instances = new AtomicInteger(0); - - @Inject - public TestImpl() { - instances.incrementAndGet(); - } - } -} diff --git a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java index 94fc0d88752..916926e36a4 100644 --- a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java @@ -211,11 +211,11 @@ public class IndexFolderUpgraderTests extends ESTestCase { throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length); } // the bwc scripts packs the indices under this path - Path src = list[0].resolve("nodes/0/indices/" + indexName); + Path src = OldIndexUtils.getIndexDir(logger, indexName, path.getFileName().toString(), list[0]); assertTrue("[" + path + "] missing index dir: " + src.toString(), Files.exists(src)); final Path indicesPath = randomFrom(nodeEnvironment.nodePaths()).indicesPath; logger.info("--> injecting index [{}] into [{}]", indexName, indicesPath); - OldIndexUtils.copyIndex(logger, src, indexName, indicesPath); + OldIndexUtils.copyIndex(logger, src, src.getFileName().toString(), indicesPath); IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment); // ensure old index folder is deleted diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java index c5d0ec4257e..ed4c7c9f7bb 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java @@ -30,8 +30,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -/** - */ public class RefCountedTests extends ESTestCase { public void testRefCount() throws IOException { MyRefCounted counted = new MyRefCounted(); diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java index d402f09f07d..3f914f61d48 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java @@ -319,6 +319,9 @@ public class ThreadContextTests extends ESTestCase { // But we do inside of it withContext.run(); + + // but not after + assertNull(threadContext.getHeader("foo")); } } @@ -350,6 +353,177 @@ public class ThreadContextTests extends ESTestCase { } } + public void testPreservesThreadsOriginalContextOnRunException() throws IOException { + try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { + Runnable withContext; + + // create a abstract runnable, add headers and transient objects and verify in the methods + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("foo", "bar_transient"); + withContext = threadContext.preserveContext(new AbstractRunnable() { + + @Override + public void onAfter() { + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals("bar_transient", threadContext.getTransient("foo")); + assertNotNull(threadContext.getTransient("failure")); + assertEquals("exception from doRun", ((RuntimeException)threadContext.getTransient("failure")).getMessage()); + assertFalse(threadContext.isDefaultContext()); + threadContext.putTransient("after", "after"); + } + + @Override + public void onFailure(Exception e) { + assertEquals("exception from doRun", e.getMessage()); + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals("bar_transient", threadContext.getTransient("foo")); + assertFalse(threadContext.isDefaultContext()); + threadContext.putTransient("failure", e); + } + + @Override + protected void doRun() throws Exception { + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals("bar_transient", threadContext.getTransient("foo")); + assertFalse(threadContext.isDefaultContext()); + throw new RuntimeException("exception from doRun"); + } + }); + } + + // We don't see the header outside of the runnable + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("foo")); + assertNull(threadContext.getTransient("failure")); + assertNull(threadContext.getTransient("after")); + assertTrue(threadContext.isDefaultContext()); + + // But we do inside of it + withContext.run(); + + // verify not seen after + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("foo")); + assertNull(threadContext.getTransient("failure")); + assertNull(threadContext.getTransient("after")); + assertTrue(threadContext.isDefaultContext()); + + // repeat with regular runnable + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("foo", "bar_transient"); + withContext = threadContext.preserveContext(() -> { + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals("bar_transient", threadContext.getTransient("foo")); + assertFalse(threadContext.isDefaultContext()); + threadContext.putTransient("run", true); + throw new RuntimeException("exception from run"); + }); + } + + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("foo")); + assertNull(threadContext.getTransient("run")); + assertTrue(threadContext.isDefaultContext()); + + final Runnable runnable = withContext; + RuntimeException e = expectThrows(RuntimeException.class, runnable::run); + assertEquals("exception from run", e.getMessage()); + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("foo")); + assertNull(threadContext.getTransient("run")); + assertTrue(threadContext.isDefaultContext()); + } + } + + public void testPreservesThreadsOriginalContextOnFailureException() throws IOException { + try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { + Runnable withContext; + + // a runnable that throws from onFailure + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("foo", "bar_transient"); + withContext = threadContext.preserveContext(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + throw new RuntimeException("from onFailure", e); + } + + @Override + protected void doRun() throws Exception { + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals("bar_transient", threadContext.getTransient("foo")); + assertFalse(threadContext.isDefaultContext()); + throw new RuntimeException("from doRun"); + } + }); + } + + // We don't see the header outside of the runnable + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("foo")); + assertTrue(threadContext.isDefaultContext()); + + // But we do inside of it + RuntimeException e = expectThrows(RuntimeException.class, withContext::run); + assertEquals("from onFailure", e.getMessage()); + assertEquals("from doRun", e.getCause().getMessage()); + + // but not after + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("foo")); + assertTrue(threadContext.isDefaultContext()); + } + } + + public void testPreservesThreadsOriginalContextOnAfterException() throws IOException { + try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { + Runnable withContext; + + // a runnable that throws from onAfter + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("foo", "bar_transient"); + withContext = threadContext.preserveContext(new AbstractRunnable() { + + @Override + public void onAfter() { + throw new RuntimeException("from onAfter"); + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException("from onFailure", e); + } + + @Override + protected void doRun() throws Exception { + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals("bar_transient", threadContext.getTransient("foo")); + assertFalse(threadContext.isDefaultContext()); + } + }); + } + + // We don't see the header outside of the runnable + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("foo")); + assertTrue(threadContext.isDefaultContext()); + + // But we do inside of it + RuntimeException e = expectThrows(RuntimeException.class, withContext::run); + assertEquals("from onAfter", e.getMessage()); + assertNull(e.getCause()); + + // but not after + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("foo")); + assertTrue(threadContext.isDefaultContext()); + } + } + /** * Sometimes wraps a Runnable in an AbstractRunnable. */ diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index 227918ff971..e1311483777 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -47,15 +47,18 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.math.BigInteger; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; import java.util.Collections; import java.util.Date; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; +import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -846,6 +849,140 @@ public abstract class BaseXContentTestCase extends ESTestCase { XContentBuilder.ensureNotNull("foo", "No exception must be thrown"); } + public void testEnsureNoSelfReferences() throws IOException { + XContentBuilder.ensureNoSelfReferences(emptyMap()); + XContentBuilder.ensureNoSelfReferences(null); + + Map map = new HashMap<>(); + map.put("field", map); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map)); + assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + } + + /** + * Test that the same map written multiple times do not trigger the self-reference check in + * {@link XContentBuilder#ensureNoSelfReferences(Object)} + */ + public void testRepeatedMapsAndNoSelfReferences() throws Exception { + Map mapB = singletonMap("b", "B"); + Map mapC = singletonMap("c", "C"); + Map mapD = singletonMap("d", "D"); + Map mapA = new HashMap<>(); + mapA.put("a", 0); + mapA.put("b1", mapB); + mapA.put("b2", mapB); + mapA.put("c", Arrays.asList(mapC, mapC)); + mapA.put("d1", mapD); + mapA.put("d2", singletonMap("d3", mapD)); + + final String expected = + "{'map':{'b2':{'b':'B'},'a':0,'c':[{'c':'C'},{'c':'C'}],'d1':{'d':'D'},'d2':{'d3':{'d':'D'}},'b1':{'b':'B'}}}"; + + assertResult(expected, () -> builder().startObject().field("map", mapA).endObject()); + assertResult(expected, () -> builder().startObject().field("map").value(mapA).endObject()); + assertResult(expected, () -> builder().startObject().field("map").map(mapA).endObject()); + } + + public void testSelfReferencingMapsOneLevel() throws IOException { + Map map0 = new HashMap<>(); + Map map1 = new HashMap<>(); + + map0.put("foo", 0); + map0.put("map1", map1); // map 0 -> map 1 + + map1.put("bar", 1); + map1.put("map0", map0); // map 1 -> map 0 loop + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map0)); + assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + } + + public void testSelfReferencingMapsTwoLevels() throws IOException { + Map map0 = new HashMap<>(); + Map map1 = new HashMap<>(); + Map map2 = new HashMap<>(); + + map0.put("foo", 0); + map0.put("map1", map1); // map 0 -> map 1 + + map1.put("bar", 1); + map1.put("map2", map2); // map 1 -> map 2 + + map2.put("baz", 2); + map2.put("map0", map0); // map 2 -> map 0 loop + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map0)); + assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + } + + public void testSelfReferencingObjectsArray() throws IOException { + Object[] values = new Object[3]; + values[0] = 0; + values[1] = 1; + values[2] = values; + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder() + .startObject() + .field("field", values) + .endObject()); + assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + + e = expectThrows(IllegalArgumentException.class, () -> builder() + .startObject() + .array("field", values) + .endObject()); + assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + } + + public void testSelfReferencingIterable() throws IOException { + List values = new ArrayList<>(); + values.add("foo"); + values.add("bar"); + values.add(values); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder() + .startObject() + .field("field", (Iterable) values) + .endObject()); + assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + } + + public void testSelfReferencingIterableOneLevel() throws IOException { + Map map = new HashMap<>(); + map.put("foo", 0); + map.put("bar", 1); + + Iterable values = Arrays.asList("one", "two", map); + map.put("baz", values); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder() + .startObject() + .field("field", (Iterable) values) + .endObject()); + assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + } + + public void testSelfReferencingIterableTwoLevels() throws IOException { + Map map0 = new HashMap<>(); + Map map1 = new HashMap<>(); + Map map2 = new HashMap<>(); + + List it1 = new ArrayList<>(); + + map0.put("foo", 0); + map0.put("it1", (Iterable) it1); // map 0 -> it1 + + it1.add(map1); + it1.add(map2); // it 1 -> map 1, map 2 + + map2.put("baz", 2); + map2.put("map0", map0); // map 2 -> map 0 loop + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map0)); + assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + } + private static void expectUnclosedException(ThrowingRunnable runnable) { IllegalStateException e = expectThrows(IllegalStateException.class, runnable); assertThat(e.getMessage(), containsString("Failed to close the XContentBuilder")); diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java index 2cc4889be9d..733d3d1775d 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -113,6 +115,82 @@ public class ObjectParserTests extends ESTestCase { } + /** + * This test ensures we can use a classic pull-parsing parser + * together with the object parser + */ + public void testUseClassicPullParsingSubParser() throws IOException { + class ClassicParser { + URI parseURI(XContentParser parser) throws IOException { + String fieldName = null; + String host = ""; + int port = 0; + XContentParser.Token token; + while (( token = parser.currentToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING){ + if (fieldName.equals("host")) { + host = parser.text(); + } else { + throw new IllegalStateException("boom"); + } + } else if (token == XContentParser.Token.VALUE_NUMBER){ + if (fieldName.equals("port")) { + port = parser.intValue(); + } else { + throw new IllegalStateException("boom"); + } + } + parser.nextToken(); + } + return URI.create(host + ":" + port); + } + } + class Foo { + public String name; + public URI uri; + public void setName(String name) { + this.name = name; + } + + public void setURI(URI uri) { + this.uri = uri; + } + } + + class CustomParseFieldMatchSupplier implements ParseFieldMatcherSupplier { + + public final ClassicParser parser; + + CustomParseFieldMatchSupplier(ClassicParser parser) { + this.parser = parser; + } + + @Override + public ParseFieldMatcher getParseFieldMatcher() { + return ParseFieldMatcher.EMPTY; + } + + public URI parseURI(XContentParser parser) { + try { + return this.parser.parseURI(parser); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + XContentParser parser = XContentType.JSON.xContent() + .createParser("{\"url\" : { \"host\": \"http://foobar\", \"port\" : 80}, \"name\" : \"foobarbaz\"}"); + ObjectParser objectParser = new ObjectParser<>("foo"); + objectParser.declareString(Foo::setName, new ParseField("name")); + objectParser.declareObjectOrDefault(Foo::setURI, (p, s) -> s.parseURI(p), () -> null, new ParseField("url")); + Foo s = objectParser.parse(parser, new Foo(), new CustomParseFieldMatchSupplier(new ClassicParser())); + assertEquals(s.uri.getHost(), "foobar"); + assertEquals(s.uri.getPort(), 80); + assertEquals(s.name, "foobarbaz"); + } + public void testExceptions() throws IOException { XContentParser parser = XContentType.JSON.xContent().createParser("{\"test\" : \"foo\"}"); class TestStruct { diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java index 8319873878a..8a3d0ef9ccf 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java @@ -30,9 +30,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class XContentFactoryTests extends ESTestCase { public void testGuessJson() throws IOException { testGuessType(XContentType.JSON); diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index cce349f417c..cbcff431c2b 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -75,4 +76,30 @@ public class XContentParserTests extends ESTestCase { assertThat(e.getMessage(), containsString("Failed to parse list")); } } + + public void testReadMapStrings() throws IOException { + Map map = readMapStrings("{\"foo\": {\"kbar\":\"vbar\"}}"); + assertThat(map.get("kbar"), equalTo("vbar")); + assertThat(map.size(), equalTo(1)); + map = readMapStrings("{\"foo\": {\"kbar\":\"vbar\", \"kbaz\":\"vbaz\"}}"); + assertThat(map.get("kbar"), equalTo("vbar")); + assertThat(map.get("kbaz"), equalTo("vbaz")); + assertThat(map.size(), equalTo(2)); + map = readMapStrings("{\"foo\": {}}"); + assertThat(map.size(), equalTo(0)); + } + + @SuppressWarnings("unchecked") + private static Map readMapStrings(String source) throws IOException { + try (XContentParser parser = XContentType.JSON.xContent().createParser(source)) { + XContentParser.Token token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), equalTo("foo")); + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + return randomBoolean() ? parser.mapStringsOrdered() : parser.mapStrings(); + } + } } diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java index fb726b97e3e..8852f090ef6 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java @@ -31,9 +31,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class JsonVsCborTests extends ESTestCase { public void testCompareParsingTokens() throws IOException { BytesStreamOutput xsonOs = new BytesStreamOutput(); diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java index ecf49be6629..9f2910bc11f 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java @@ -31,9 +31,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class JsonVsSmileTests extends ESTestCase { public void testCompareParsingTokens() throws IOException { BytesStreamOutput xsonOs = new BytesStreamOutput(); diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java index ba2043bbe20..7e757c0bbc4 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java @@ -44,8 +44,6 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsEqual.equalTo; -/** - */ public class XContentMapValuesTests extends ESTestCase { public void testFilter() throws Exception { XContentBuilder builder = XContentFactory.jsonBuilder().startObject() @@ -551,4 +549,43 @@ public class XContentMapValuesTests extends ESTestCase { parser.list()); } } + + public void testDotsInFieldNames() { + Map map = new HashMap<>(); + map.put("foo.bar", 2); + Map sub = new HashMap<>(); + sub.put("baz", 3); + map.put("foo", sub); + map.put("quux", 5); + + // dots in field names in includes + Map filtered = XContentMapValues.filter(map, new String[] {"foo"}, new String[0]); + Map expected = new HashMap<>(map); + expected.remove("quux"); + assertEquals(expected, filtered); + + // dots in field names in excludes + filtered = XContentMapValues.filter(map, new String[0], new String[] {"foo"}); + expected = new HashMap<>(map); + expected.keySet().retainAll(Collections.singleton("quux")); + assertEquals(expected, filtered); + } + + public void testSupplementaryCharactersInPaths() { + Map map = new HashMap<>(); + map.put("搜索", 2); + map.put("指数", 3); + + assertEquals(Collections.singletonMap("搜索", 2), XContentMapValues.filter(map, new String[] {"搜索"}, new String[0])); + assertEquals(Collections.singletonMap("指数", 3), XContentMapValues.filter(map, new String[0], new String[] {"搜索"})); + } + + public void testSharedPrefixes() { + Map map = new HashMap<>(); + map.put("foobar", 2); + map.put("foobaz", 3); + + assertEquals(Collections.singletonMap("foobar", 2), XContentMapValues.filter(map, new String[] {"foobar"}, new String[0])); + assertEquals(Collections.singletonMap("foobaz", 3), XContentMapValues.filter(map, new String[0], new String[] {"foobar"})); + } } diff --git a/core/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java b/core/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java index c25a0a6503b..4c84d7c6722 100644 --- a/core/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java +++ b/core/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java @@ -30,9 +30,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class JacksonLocationTests extends ESTestCase { public void testLocationExtraction() throws IOException { // { diff --git a/core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java b/core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java index 442a566a77e..9ed433918d1 100644 --- a/core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java +++ b/core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java @@ -42,9 +42,6 @@ import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -/** - * - */ public class SimpleJodaTests extends ESTestCase { public void testMultiParsers() { DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); diff --git a/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java b/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java index 1455b397e74..2b08213ab43 100644 --- a/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java +++ b/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java @@ -60,9 +60,6 @@ import java.util.ArrayList; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class SimpleLuceneTests extends ESTestCase { public void testSortValues() throws Exception { Directory dir = new RAMDirectory(); diff --git a/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java index fbb5115903c..5e02da294c8 100644 --- a/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java +++ b/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java @@ -44,9 +44,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class VectorHighlighterTests extends ESTestCase { public void testVectorHighlighter() throws Exception { Directory dir = new RAMDirectory(); diff --git a/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java b/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java index 481d15020fc..264e6ed0289 100644 --- a/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.discovery; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.ESTestCase; @@ -77,7 +76,7 @@ public class BlockingClusterStatePublishResponseHandlerTests extends ESTestCase int nodeCount = scaledRandomIntBetween(10, 20); DiscoveryNode[] allNodes = new DiscoveryNode[nodeCount]; for (int i = 0; i < nodeCount; i++) { - DiscoveryNode node = new DiscoveryNode("node_" + i, LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode node = new DiscoveryNode("node_" + i, buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); allNodes[i] = node; } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 3b436f45410..a5262922efe 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -18,50 +18,135 @@ */ package org.elasticsearch.discovery; -import org.elasticsearch.common.inject.ModuleTestCase; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; + +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.zen.ElectMasterService; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.ZenPing; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.NoopDiscovery; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; -/** - */ -public class DiscoveryModuleTests extends ModuleTestCase { +public class DiscoveryModuleTests extends ESTestCase { - public static class DummyMasterElectionService extends ElectMasterService { + private TransportService transportService; + private ClusterService clusterService; - public DummyMasterElectionService(Settings settings) { - super(settings); + public interface DummyHostsProviderPlugin extends DiscoveryPlugin { + Map> impl(); + @Override + default Map> getZenHostsProviders(TransportService transportService, + NetworkService networkService) { + return impl(); } } - public void testRegisterMasterElectionService() { - Settings settings = Settings.builder().put(DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING.getKey(), "custom").build(); - DiscoveryModule module = new DiscoveryModule(settings); - module.addElectMasterService("custom", DummyMasterElectionService.class); - assertBinding(module, ElectMasterService.class, DummyMasterElectionService.class); - assertBinding(module, Discovery.class, ZenDiscovery.class); + public interface DummyDiscoveryPlugin extends DiscoveryPlugin { + Map> impl(); + @Override + default Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, ZenPing zenPing) { + return impl(); + } } - public void testLoadUnregisteredMasterElectionService() { - Settings settings = Settings.builder().put(DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING.getKey(), "foobar").build(); - DiscoveryModule module = new DiscoveryModule(settings); - module.addElectMasterService("custom", DummyMasterElectionService.class); - assertBindingFailure(module, "Unknown master service type [foobar]"); + @Before + public void setupDummyServices() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, null, null); + clusterService = new ClusterService(Settings.EMPTY, clusterSettings, null); } - public void testRegisterDefaults() { - Settings settings = Settings.EMPTY; - DiscoveryModule module = new DiscoveryModule(settings); - assertBinding(module, Discovery.class, ZenDiscovery.class); + @After + public void clearDummyServices() throws IOException { + IOUtils.close(transportService, clusterService); + transportService = null; + clusterService = null; + } + + private DiscoveryModule newModule(Settings settings, Function createZenPing, + List plugins) { + return new DiscoveryModule(settings, null, transportService, null, clusterService, createZenPing, plugins); + } + + public void testDefaults() { + DiscoveryModule module = newModule(Settings.EMPTY, hostsProvider -> null, Collections.emptyList()); + assertTrue(module.getDiscovery() instanceof ZenDiscovery); + } + + public void testLazyConstructionDiscovery() { + DummyDiscoveryPlugin plugin = () -> Collections.singletonMap("custom", + () -> { throw new AssertionError("created discovery type which was not selected"); }); + newModule(Settings.EMPTY, hostsProvider -> null, Collections.singletonList(plugin)); } public void testRegisterDiscovery() { Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "custom").build(); - DiscoveryModule module = new DiscoveryModule(settings); - module.addDiscoveryType("custom", NoopDiscovery.class); - assertBinding(module, Discovery.class, NoopDiscovery.class); + DummyDiscoveryPlugin plugin = () -> Collections.singletonMap("custom", NoopDiscovery::new); + DiscoveryModule module = newModule(settings, hostsProvider -> null, Collections.singletonList(plugin)); + assertTrue(module.getDiscovery() instanceof NoopDiscovery); } + public void testUnknownDiscovery() { + Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "dne").build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + newModule(settings, hostsProvider -> null, Collections.emptyList())); + assertEquals("Unknown discovery type [dne]", e.getMessage()); + } + public void testDuplicateDiscovery() { + DummyDiscoveryPlugin plugin1 = () -> Collections.singletonMap("dup", () -> null); + DummyDiscoveryPlugin plugin2 = () -> Collections.singletonMap("dup", () -> null); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + newModule(Settings.EMPTY, hostsProvider -> null, Arrays.asList(plugin1, plugin2))); + assertEquals("Cannot register discovery type [dup] twice", e.getMessage()); + } + + public void testHostsProvider() { + Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "custom").build(); + final UnicastHostsProvider provider = Collections::emptyList; + DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", () -> provider); + newModule(settings, hostsProvider -> { + assertEquals(provider, hostsProvider); + return null; + }, Collections.singletonList(plugin)); + } + + public void testUnknownHostsProvider() { + Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "dne").build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + newModule(settings, hostsProvider -> null, Collections.emptyList())); + assertEquals("Unknown zen hosts provider [dne]", e.getMessage()); + } + + public void testDuplicateHostsProvider() { + DummyHostsProviderPlugin plugin1 = () -> Collections.singletonMap("dup", () -> null); + DummyHostsProviderPlugin plugin2 = () -> Collections.singletonMap("dup", () -> null); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + newModule(Settings.EMPTY, hostsProvider -> null, Arrays.asList(plugin1, plugin2))); + assertEquals("Cannot register zen hosts provider [dup] twice", e.getMessage()); + } + + public void testLazyConstructionHostsProvider() { + DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", + () -> { throw new AssertionError("created hosts provider which was not selected"); }); + newModule(Settings.EMPTY, hostsProvider -> null, Collections.singletonList(plugin)); + } } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 764e363d4d6..684633dfabf 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -43,6 +43,8 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.ClusterStateStatus; +import org.elasticsearch.cluster.service.ClusterServiceState; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; @@ -50,13 +52,12 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.zen.ElectMasterService; +import org.elasticsearch.discovery.zen.FaultDetection; +import org.elasticsearch.discovery.zen.MembershipAction; +import org.elasticsearch.discovery.zen.PublishClusterStateAction; +import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.fd.FaultDetection; -import org.elasticsearch.discovery.zen.membership.MembershipAction; -import org.elasticsearch.discovery.zen.ping.ZenPing; -import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; -import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; +import org.elasticsearch.discovery.zen.ZenPing; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.store.IndicesStoreIntegrationIT; import org.elasticsearch.monitor.jvm.HotThreads; @@ -121,7 +122,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0) -@ESIntegTestCase.SuppressLocalMode @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { @@ -129,6 +129,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { private ClusterDiscoveryConfiguration discoveryConfig; + @Override + protected boolean addMockZenPings() { + return false; + } @Override protected Settings nodeSettings(int nodeOrdinal) { @@ -150,11 +154,31 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { return 1; } + private boolean disableBeforeIndexDeletion; + + @Before + public void setUp() throws Exception { + super.setUp(); + disableBeforeIndexDeletion = false; + } + @Override - protected void beforeIndexDeletion() { - // some test may leave operations in flight - // this is because the disruption schemes swallow requests by design - // as such, these operations will never be marked as finished + public void setDisruptionScheme(ServiceDisruptionScheme scheme) { + if (scheme instanceof NetworkDisruption && + ((NetworkDisruption) scheme).getNetworkLinkDisruptionType() instanceof NetworkUnresponsive) { + // the network unresponsive disruption may leave operations in flight + // this is because this disruption scheme swallows requests by design + // as such, these operations will never be marked as finished + disableBeforeIndexDeletion = true; + } + super.setDisruptionScheme(scheme); + } + + @Override + protected void beforeIndexDeletion() throws Exception { + if (disableBeforeIndexDeletion == false) { + super.beforeIndexDeletion(); + } } private List startCluster(int numberOfNodes) throws ExecutionException, InterruptedException { @@ -172,12 +196,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureStableCluster(numberOfNodes); // TODO: this is a temporary solution so that nodes will not base their reaction to a partition based on previous successful results - for (ZenPingService pingService : internalCluster().getInstances(ZenPingService.class)) { - for (ZenPing zenPing : pingService.zenPings()) { - if (zenPing instanceof UnicastZenPing) { - ((UnicastZenPing) zenPing).clearTemporalResponses(); - } - } + ZenPing zenPing = internalCluster().getInstance(ZenPing.class); + if (zenPing instanceof UnicastZenPing) { + ((UnicastZenPing) zenPing).clearTemporalResponses(); } return nodes; } @@ -358,7 +379,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } if (!success) { fail("node [" + node + "] has no master or has blocks, despite of being on the right side of the partition. State dump:\n" - + nodeState.prettyPrint()); + + nodeState); } } @@ -445,13 +466,13 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { assertEquals("unequal node count", state.nodes().getSize(), nodeState.nodes().getSize()); assertEquals("different masters ", state.nodes().getMasterNodeId(), nodeState.nodes().getMasterNodeId()); assertEquals("different meta data version", state.metaData().version(), nodeState.metaData().version()); - if (!state.routingTable().prettyPrint().equals(nodeState.routingTable().prettyPrint())) { + if (!state.routingTable().toString().equals(nodeState.routingTable().toString())) { fail("different routing"); } } catch (AssertionError t) { fail("failed comparing cluster state: " + t.getMessage() + "\n" + - "--- cluster state of node [" + nodes.get(0) + "]: ---\n" + state.prettyPrint() + - "\n--- cluster state [" + node + "]: ---\n" + nodeState.prettyPrint()); + "--- cluster state of node [" + nodes.get(0) + "]: ---\n" + state + + "\n--- cluster state [" + node + "]: ---\n" + nodeState); } } @@ -464,7 +485,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { * This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates */ @TestLogging("_root:DEBUG,org.elasticsearch.action.index:TRACE,org.elasticsearch.action.get:TRACE,discovery:TRACE,org.elasticsearch.cluster.service:TRACE," - + "org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.indices.cluster:TRACE") + + "org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE") public void testAckedIndexing() throws Exception { final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5; @@ -744,15 +765,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); // Make sure that the end state is consistent on all nodes: assertDiscoveryCompleted(nodes); - // Use assertBusy(...) because the unfrozen node may take a while to actually join the cluster. - // The assertDiscoveryCompleted(...) can't know if all nodes have the old master node in all of the local cluster states - assertBusy(new Runnable() { - @Override - public void run() { - assertMaster(newMasterNode, nodes); - } - }); - + assertMaster(newMasterNode, nodes); assertThat(masters.size(), equalTo(2)); for (Map.Entry>> entry : masters.entrySet()) { @@ -843,10 +856,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list // includes all the other nodes that have pinged it and the issue doesn't manifest - for (ZenPingService pingService : internalCluster().getInstances(ZenPingService.class)) { - for (ZenPing zenPing : pingService.zenPings()) { - ((UnicastZenPing) zenPing).clearTemporalResponses(); - } + ZenPing zenPing = internalCluster().getInstance(ZenPing.class); + if (zenPing instanceof UnicastZenPing) { + ((UnicastZenPing) zenPing).clearTemporalResponses(); } // Simulate a network issue between the unlucky node and elected master node in both directions. @@ -881,10 +893,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list // includes all the other nodes that have pinged it and the issue doesn't manifest - for (ZenPingService pingService : internalCluster().getInstances(ZenPingService.class)) { - for (ZenPing zenPing : pingService.zenPings()) { - ((UnicastZenPing) zenPing).clearTemporalResponses(); - } + ZenPing zenPing = internalCluster().getInstance(ZenPing.class); + if (zenPing instanceof UnicastZenPing) { + ((UnicastZenPing) zenPing).clearTemporalResponses(); } // Simulate a network issue between the unicast target node and the rest of the cluster @@ -990,7 +1001,11 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { String isolatedNode = randomBoolean() ? masterNode : nonMasterNode; TwoPartitions partitions = isolateNode(isolatedNode); - NetworkDisruption networkDisruption = addRandomDisruptionType(partitions); + // we cannot use the NetworkUnresponsive disruption type here as it will swallow the "shard failed" request, calling neither + // onSuccess nor onFailure on the provided listener. + NetworkLinkDisruptionType disruptionType = new NetworkDisconnect(); + NetworkDisruption networkDisruption = new NetworkDisruption(partitions, disruptionType); + setDisruptionScheme(networkDisruption); networkDisruption.startDisrupting(); service.localShardFailed(failedShard, "simulated", new CorruptIndexException("simulated", (String) null), new @@ -1188,9 +1203,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Don't restart the master node until we know the index deletion has taken effect on master and the master eligible node. assertBusy(() -> { for (String masterNode : allMasterEligibleNodes) { - final ClusterState masterState = internalCluster().clusterService(masterNode).state(); - assertTrue("index not deleted on " + masterNode, masterState.metaData().hasIndex(idxName) == false && - masterState.status() == ClusterState.ClusterStateStatus.APPLIED); + final ClusterServiceState masterState = internalCluster().clusterService(masterNode).clusterServiceState(); + assertTrue("index not deleted on " + masterNode, masterState.getClusterState().metaData().hasIndex(idxName) == false && + masterState.getClusterStateStatus() == ClusterStateStatus.APPLIED); } }); internalCluster().restartNode(masterNode1, InternalTestCluster.EMPTY_CALLBACK); @@ -1248,7 +1263,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { final ClusterState state = client().admin().cluster().prepareState().get().getState(); if (state.metaData().hasIndex("test") == false) { - fail("index 'test' was lost. current cluster state: " + state.prettyPrint()); + fail("index 'test' was lost. current cluster state: " + state); } } @@ -1345,14 +1360,16 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { }, 10, TimeUnit.SECONDS); } - private void assertMaster(String masterNode, List nodes) { - for (String node : nodes) { - ClusterState state = getNodeClusterState(node); - String failMsgSuffix = "cluster_state:\n" + state.prettyPrint(); - assertThat("wrong node count on [" + node + "]. " + failMsgSuffix, state.nodes().getSize(), equalTo(nodes.size())); - String otherMasterNodeName = state.nodes().getMasterNode() != null ? state.nodes().getMasterNode().getName() : null; - assertThat("wrong master on node [" + node + "]. " + failMsgSuffix, otherMasterNodeName, equalTo(masterNode)); - } + private void assertMaster(String masterNode, List nodes) throws Exception { + assertBusy(() -> { + for (String node : nodes) { + ClusterState state = getNodeClusterState(node); + String failMsgSuffix = "cluster_state:\n" + state; + assertThat("wrong node count on [" + node + "]. " + failMsgSuffix, state.nodes().getSize(), equalTo(nodes.size())); + String otherMasterNodeName = state.nodes().getMasterNode() != null ? state.nodes().getMasterNode().getName() : null; + assertThat("wrong master on node [" + node + "]. " + failMsgSuffix, otherMasterNodeName, equalTo(masterNode)); + } + }); } private void assertDiscoveryCompleted(List nodes) throws InterruptedException { diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java index d51447c9298..f089f76dcff 100644 --- a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java @@ -29,22 +29,24 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.discovery.zen.fd.FaultDetection; -import org.elasticsearch.discovery.zen.fd.MasterFaultDetection; -import org.elasticsearch.discovery.zen.fd.NodesFaultDetection; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.discovery.zen.FaultDetection; +import org.elasticsearch.discovery.zen.MasterFaultDetection; +import org.elasticsearch.discovery.zen.NodesFaultDetection; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.MockTcpTransport; import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.local.LocalTransport; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import org.junit.After; @@ -141,13 +143,9 @@ public class ZenFaultDetectionTests extends ESTestCase { // trace zenfd actions but keep the default otherwise .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), singleton(TransportLivenessAction.NAME)) .build(), - new LocalTransport(settings, threadPool, namedWriteableRegistry, circuitBreakerService) { - @Override - protected Version getVersion() { - return version; - } - }, - threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, circuitBreakerService, + namedWriteableRegistry, new NetworkService(settings, Collections.emptyList()), version), + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); return transportService; diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java index 95ad650cd16..3af2e32eefa 100644 --- a/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java @@ -35,7 +35,6 @@ import java.util.concurrent.ExecutionException; import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -@ESIntegTestCase.SuppressLocalMode public class ZenUnicastDiscoveryIT extends ESIntegTestCase { private ClusterDiscoveryConfiguration discoveryConfig; @@ -67,7 +66,7 @@ public class ZenUnicastDiscoveryIT extends ESIntegTestCase { internalCluster().startNodesAsync(currentNumNodes - unicastHostOrdinals.length).get(); if (client().admin().cluster().prepareHealth().setWaitForNodes("" + currentNumNodes).get().isTimedOut()) { - logger.info("cluster forming timed out, cluster state:\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint()); + logger.info("cluster forming timed out, cluster state:\n{}", client().admin().cluster().prepareState().get().getState()); fail("timed out waiting for cluster to form with [" + currentNumNodes + "] nodes"); } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java index 737607df6be..0c4862bf32a 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.discovery.zen; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.zen.ElectMasterService.MasterCandidate; import org.elasticsearch.test.ESTestCase; @@ -50,7 +49,7 @@ public class ElectMasterServiceTests extends ESTestCase { if (randomBoolean()) { roles.add(DiscoveryNode.Role.MASTER); } - DiscoveryNode node = new DiscoveryNode("n_" + i, "n_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(), + DiscoveryNode node = new DiscoveryNode("n_" + i, "n_" + i, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, Version.CURRENT); nodes.add(node); } @@ -65,7 +64,7 @@ public class ElectMasterServiceTests extends ESTestCase { for (int i = 0; i < count; i++) { Set roles = new HashSet<>(); roles.add(DiscoveryNode.Role.MASTER); - DiscoveryNode node = new DiscoveryNode("n_" + i, "n_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(), + DiscoveryNode node = new DiscoveryNode("n_" + i, "n_" + i, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, Version.CURRENT); candidates.add(new MasterCandidate(node, randomBoolean() ? MasterCandidate.UNRECOVERED_CLUSTER_VERSION : randomPositiveLong())); } @@ -77,7 +76,7 @@ public class ElectMasterServiceTests extends ESTestCase { public void testSortByMasterLikelihood() { List nodes = generateRandomNodes(); - List sortedNodes = electMasterService().sortByMasterLikelihood(nodes); + List sortedNodes = ElectMasterService.sortByMasterLikelihood(nodes); assertEquals(nodes.size(), sortedNodes.size()); DiscoveryNode prevNode = sortedNodes.get(0); for (int i = 1; i < sortedNodes.size(); i++) { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 907d3786992..95fcb88a7ea 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -38,12 +38,10 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.membership.MembershipAction; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -427,7 +425,7 @@ public class NodeJoinControllerTests extends ESTestCase { public void testNewClusterStateOnExistingNodeJoin() throws InterruptedException, ExecutionException { ClusterState state = clusterService.state(); final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(state.nodes()); - final DiscoveryNode other_node = new DiscoveryNode("other_node", LocalTransportAddress.buildUnique(), + final DiscoveryNode other_node = new DiscoveryNode("other_node", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); nodesBuilder.add(other_node); setState(clusterService, ClusterState.builder(state).nodes(nodesBuilder)); @@ -557,7 +555,7 @@ public class NodeJoinControllerTests extends ESTestCase { final DiscoveryNode other_node = new DiscoveryNode( randomBoolean() ? existing.getName() : "other_name", existing.getId(), - randomBoolean() ? existing.getAddress() : LocalTransportAddress.buildUnique(), + randomBoolean() ? existing.getAddress() : buildNewFakeTransportAddress(), randomBoolean() ? existing.getAttributes() : Collections.singletonMap("attr", "other"), randomBoolean() ? existing.getRoles() : new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))), randomBoolean() ? existing.getVersion() : VersionUtils.randomVersion(random())); @@ -585,7 +583,7 @@ public class NodeJoinControllerTests extends ESTestCase { */ public void testElectionBasedOnConflictingNodes() throws InterruptedException, ExecutionException { final DiscoveryNode masterNode = clusterService.localNode(); - final DiscoveryNode otherNode = new DiscoveryNode("other_node", LocalTransportAddress.buildUnique(), emptyMap(), + final DiscoveryNode otherNode = new DiscoveryNode("other_node", buildNewFakeTransportAddress(), emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); // simulate master going down with stale nodes in it's cluster state (for example when min master nodes is set to 2) // also add some shards to that node @@ -629,7 +627,7 @@ public class NodeJoinControllerTests extends ESTestCase { setState(clusterService, stateBuilder.build()); final DiscoveryNode restartedNode = new DiscoveryNode(otherNode.getId(), - randomBoolean() ? otherNode.getAddress() : LocalTransportAddress.buildUnique(), otherNode.getAttributes(), + randomBoolean() ? otherNode.getAddress() : buildNewFakeTransportAddress(), otherNode.getAttributes(), otherNode.getRoles(), Version.CURRENT); nodeJoinController.startElectionContext(); @@ -669,7 +667,7 @@ public class NodeJoinControllerTests extends ESTestCase { ClusterState state = clusterService.state(); final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(state.nodes()); for (int i = 0;i< count;i++) { - final DiscoveryNode node = new DiscoveryNode("node_" + state.nodes().getSize() + i, LocalTransportAddress.buildUnique(), + final DiscoveryNode node = new DiscoveryNode("node_" + state.nodes().getSize() + i, buildNewFakeTransportAddress(), emptyMap(), new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))), Version.CURRENT); nodesBuilder.add(node); } @@ -678,10 +676,10 @@ public class NodeJoinControllerTests extends ESTestCase { protected void assertNodesInCurrentState(List expectedNodes) { final ClusterState state = clusterService.state(); - logger.info("assert for [{}] in:\n{}", expectedNodes, state.prettyPrint()); + logger.info("assert for [{}] in:\n{}", expectedNodes, state); DiscoveryNodes discoveryNodes = state.nodes(); for (DiscoveryNode node : expectedNodes) { - assertThat("missing " + node + "\n" + discoveryNodes.prettyPrint(), discoveryNodes.get(node.getId()), equalTo(node)); + assertThat("missing " + node + "\n" + discoveryNodes, discoveryNodes.get(node.getId()), equalTo(node)); } assertThat(discoveryNodes.getSize(), equalTo(expectedNodes.size())); } @@ -752,6 +750,6 @@ public class NodeJoinControllerTests extends ESTestCase { roles.add(DiscoveryNode.Role.MASTER); } final String prefix = master ? "master_" : "data_"; - return new DiscoveryNode(prefix + i, i + "", new LocalTransportAddress("test_" + i), emptyMap(), roles, Version.CURRENT); + return new DiscoveryNode(prefix + i, i + "", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT); } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java index 4492cdb52e3..d63ddfdaee6 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -177,7 +176,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { } private DiscoveryNode node(final int id) { - return new DiscoveryNode(Integer.toString(id), LocalTransportAddress.buildUnique(), Version.CURRENT); + return new DiscoveryNode(Integer.toString(id), buildNewFakeTransportAddress(), Version.CURRENT); } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/PendingClusterStatesQueueTests.java similarity index 98% rename from core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java rename to core/src/test/java/org/elasticsearch/discovery/zen/PendingClusterStatesQueueTests.java index 9bb8bf801f1..b1faaba576b 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/PendingClusterStatesQueueTests.java @@ -16,7 +16,8 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.discovery.zen.publish; + +package org.elasticsearch.discovery.zen; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -25,8 +26,8 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; -import org.elasticsearch.discovery.zen.publish.PendingClusterStatesQueue.ClusterStateContext; +import org.elasticsearch.discovery.zen.PendingClusterStatesQueue; +import org.elasticsearch.discovery.zen.PendingClusterStatesQueue.ClusterStateContext; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -237,7 +238,7 @@ public class PendingClusterStatesQueueTests extends ESTestCase { ClusterState state = lastClusterStatePerMaster[masterIndex]; if (state == null) { state = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode(masters[masterIndex], LocalTransportAddress.buildUnique(), + .add(new DiscoveryNode(masters[masterIndex], buildNewFakeTransportAddress(), emptyMap(), emptySet(),Version.CURRENT)).masterNodeId(masters[masterIndex]).build() ).build(); } else { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java similarity index 93% rename from core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java rename to core/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java index 50ec06694fe..e1d2a226a02 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.zen.publish; +package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -41,7 +41,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; @@ -100,21 +99,25 @@ public class PublishClusterStateActionTests extends ESTestCase { private final Logger logger; - public MockNode(DiscoveryNode discoveryNode, MockTransportService service, @Nullable ClusterStateListener listener, Logger logger) { + public MockNode(DiscoveryNode discoveryNode, MockTransportService service, + @Nullable ClusterStateListener listener, Logger logger) { this.discoveryNode = discoveryNode; this.service = service; this.listener = listener; this.logger = logger; - this.clusterState = ClusterState.builder(CLUSTER_NAME).nodes(DiscoveryNodes.builder().add(discoveryNode).localNodeId(discoveryNode.getId()).build()).build(); + this.clusterState = ClusterState.builder(CLUSTER_NAME).nodes(DiscoveryNodes.builder() + .add(discoveryNode).localNodeId(discoveryNode.getId()).build()).build(); } public MockNode setAsMaster() { - this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(discoveryNode.getId())).build(); + this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) + .masterNodeId(discoveryNode.getId())).build(); return this; } public MockNode resetMasterId() { - this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(null)).build(); + this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) + .masterNodeId(null)).build(); return this; } @@ -126,7 +129,8 @@ public class PublishClusterStateActionTests extends ESTestCase { @Override public void onNewClusterState(String reason) { ClusterState newClusterState = action.pendingStatesQueue().getNextClusterStateToProcess(); - logger.debug("[{}] received version [{}], uuid [{}]", discoveryNode.getName(), newClusterState.version(), newClusterState.stateUUID()); + logger.debug("[{}] received version [{}], uuid [{}]", + discoveryNode.getName(), newClusterState.version(), newClusterState.stateUUID()); if (listener != null) { ClusterChangedEvent event = new ClusterChangedEvent("", newClusterState, clusterState); listener.clusterChanged(event); @@ -156,7 +160,8 @@ public class PublishClusterStateActionTests extends ESTestCase { ThreadPool threadPool, Logger logger, Map nodes) throws Exception { final Settings settings = Settings.builder() .put("name", name) - .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", + TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .put(basSettings) .build(); @@ -229,7 +234,7 @@ public class PublishClusterStateActionTests extends ESTestCase { } private static MockTransportService buildTransportService(Settings settings, ThreadPool threadPool) { - MockTransportService transportService = MockTransportService.local(settings, Version.CURRENT, threadPool); + MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null); transportService.start(); transportService.acceptIncomingRequests(); return transportService; @@ -268,7 +273,8 @@ public class PublishClusterStateActionTests extends ESTestCase { // cluster state update - add block previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder() + .addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromDiff(nodeB.clusterState, clusterState); assertThat(nodeB.clusterState.blocks().global().size(), equalTo(1)); @@ -295,7 +301,8 @@ public class PublishClusterStateActionTests extends ESTestCase { // cluster state update 4 - update settings previousClusterState = clusterState; - MetaData metaData = MetaData.builder(clusterState.metaData()).transientSettings(Settings.builder().put("foo", "bar").build()).build(); + MetaData metaData = MetaData.builder(clusterState.metaData()) + .transientSettings(Settings.builder().put("foo", "bar").build()).build(); clusterState = ClusterState.builder(clusterState).metaData(metaData).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromDiff(nodeB.clusterState, clusterState); @@ -338,7 +345,8 @@ public class PublishClusterStateActionTests extends ESTestCase { MockNode nodeB = createMockNode("nodeB"); - // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state + // Initial cluster state with both states - the second node still shouldn't + // get diff even though it's present in the previous cluster state DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build(); ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); @@ -347,7 +355,8 @@ public class PublishClusterStateActionTests extends ESTestCase { // cluster state update - add block previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder() + .addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromDiff(nodeB.clusterState, clusterState); } @@ -370,7 +379,8 @@ public class PublishClusterStateActionTests extends ESTestCase { }); // Initial cluster state - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.getId()).masterNodeId(nodeA.discoveryNode.getId()).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() + .add(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.getId()).masterNodeId(nodeA.discoveryNode.getId()).build(); ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); // cluster state update - add nodeB @@ -381,7 +391,8 @@ public class PublishClusterStateActionTests extends ESTestCase { // cluster state update - add block previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder() + .addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); } @@ -446,7 +457,8 @@ public class PublishClusterStateActionTests extends ESTestCase { MockNode nodeB = createMockNode("nodeB"); - // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state + // Initial cluster state with both states - the second node still shouldn't get + // diff even though it's present in the previous cluster state DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build(); ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); @@ -455,7 +467,8 @@ public class PublishClusterStateActionTests extends ESTestCase { // cluster state update - add block previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder() + .addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.stateUUID(), clusterState) { @Override @@ -603,7 +616,8 @@ public class PublishClusterStateActionTests extends ESTestCase { node.action.validateIncomingState(state, null); // now set a master node - node.clusterState = ClusterState.builder(node.clusterState).nodes(DiscoveryNodes.builder(node.nodes()).masterNodeId("master")).build(); + node.clusterState = ClusterState.builder(node.clusterState) + .nodes(DiscoveryNodes.builder(node.nodes()).masterNodeId("master")).build(); logger.info("--> testing rejection of another master"); try { node.action.validateIncomingState(state, node.clusterState); @@ -619,7 +633,8 @@ public class PublishClusterStateActionTests extends ESTestCase { logger.info("--> testing rejection of another cluster name"); try { - node.action.validateIncomingState(ClusterState.builder(new ClusterName(randomAsciiOfLength(10))).nodes(node.nodes()).build(), node.clusterState); + node.action.validateIncomingState(ClusterState.builder(new ClusterName(randomAsciiOfLength(10))) + .nodes(node.nodes()).build(), node.clusterState); fail("node accepted state with another cluster name"); } catch (IllegalStateException OK) { assertThat(OK.toString(), containsString("received state from a node that is not part of the cluster")); @@ -687,8 +702,8 @@ public class PublishClusterStateActionTests extends ESTestCase { logger.info("--> publishing states"); for (ClusterState state : states) { node.action.handleIncomingClusterStateRequest( - new BytesTransportRequest(PublishClusterStateAction.serializeFullClusterState(state, Version.CURRENT), Version.CURRENT), - channel); + new BytesTransportRequest(PublishClusterStateAction.serializeFullClusterState(state, Version.CURRENT), Version.CURRENT), + channel); assertThat(channel.response.get(), equalTo((TransportResponse) TransportResponse.Empty.INSTANCE)); assertThat(channel.error.get(), nullValue()); channel.clear(); @@ -725,12 +740,14 @@ public class PublishClusterStateActionTests extends ESTestCase { */ public void testTimeoutOrCommit() throws Exception { Settings settings = Settings.builder() - .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); // short but so we will sometime commit sometime timeout + // short but so we will sometime commit sometime timeout + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); MockNode master = createMockNode("master", settings, null); MockNode node = createMockNode("node", settings, null); ClusterState state = ClusterState.builder(master.clusterState) - .nodes(DiscoveryNodes.builder(master.clusterState.nodes()).add(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build(); + .nodes(DiscoveryNodes.builder(master.clusterState.nodes()) + .add(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build(); for (int i = 0; i < 10; i++) { state = ClusterState.builder(state).incrementVersion().build(); @@ -755,7 +772,8 @@ public class PublishClusterStateActionTests extends ESTestCase { private MetaData buildMetaDataForVersion(MetaData metaData, long version) { ImmutableOpenMap.Builder indices = ImmutableOpenMap.builder(metaData.indices()); - indices.put("test" + version, IndexMetaData.builder("test" + version).settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + indices.put("test" + version, IndexMetaData.builder("test" + version) + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) .numberOfShards((int) version).numberOfReplicas(0).build()); return MetaData.builder(metaData) .transientSettings(Settings.builder().put("test", version).build()) @@ -772,16 +790,19 @@ public class PublishClusterStateActionTests extends ESTestCase { assertThat(metaData.transientSettings().get("test"), equalTo(Long.toString(version))); } - public void publishStateAndWait(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { + public void publishStateAndWait(PublishClusterStateAction action, ClusterState state, + ClusterState previousState) throws InterruptedException { publishState(action, state, previousState).await(1, TimeUnit.SECONDS); } - public AssertingAckListener publishState(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { + public AssertingAckListener publishState(PublishClusterStateAction action, ClusterState state, + ClusterState previousState) throws InterruptedException { final int minimumMasterNodes = randomIntBetween(-1, state.nodes().getMasterNodes().size()); return publishState(action, state, previousState, minimumMasterNodes); } - public AssertingAckListener publishState(PublishClusterStateAction action, ClusterState state, ClusterState previousState, int minMasterNodes) throws InterruptedException { + public AssertingAckListener publishState(PublishClusterStateAction action, ClusterState state, + ClusterState previousState, int minMasterNodes) throws InterruptedException { AssertingAckListener assertingAckListener = new AssertingAckListener(state.nodes().getSize() - 1); ClusterChangedEvent changedEvent = new ClusterChangedEvent("test update", state, previousState); action.publish(changedEvent, minMasterNodes, assertingAckListener); @@ -829,7 +850,8 @@ public class PublishClusterStateActionTests extends ESTestCase { void assertSameState(ClusterState actual, ClusterState expected) { assertThat(actual, notNullValue()); - final String reason = "\n--> actual ClusterState: " + actual.prettyPrint() + "\n--> expected ClusterState:" + expected.prettyPrint(); + final String reason = "\n--> actual ClusterState: " + actual + "\n" + + "--> expected ClusterState:" + expected; assertThat("unequal UUIDs" + reason, actual.stateUUID(), equalTo(expected.stateUUID())); assertThat("unequal versions" + reason, actual.version(), equalTo(expected.version())); } @@ -851,7 +873,9 @@ public class PublishClusterStateActionTests extends ESTestCase { AtomicBoolean timeoutOnCommit = new AtomicBoolean(); AtomicBoolean errorOnCommit = new AtomicBoolean(); - public MockPublishAction(Settings settings, TransportService transportService, Supplier clusterStateSupplier, NewPendingClusterStateListener listener, DiscoverySettings discoverySettings, ClusterName clusterName) { + public MockPublishAction(Settings settings, TransportService transportService, + Supplier clusterStateSupplier, NewPendingClusterStateListener listener, + DiscoverySettings discoverySettings, ClusterName clusterName) { super(settings, transportService, clusterStateSupplier, listener, discoverySettings, clusterName); } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java similarity index 84% rename from core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java rename to core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index bdffb5f99d6..4294bdd3dd4 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -17,8 +17,9 @@ * under the License. */ -package org.elasticsearch.discovery.zen.ping.unicast; +package org.elasticsearch.discovery.zen; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -29,14 +30,10 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.discovery.zen.ping.PingContextProvider; -import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -47,6 +44,7 @@ import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; +import java.io.IOException; import java.net.InetSocketAddress; import java.util.Collection; import java.util.Collections; @@ -60,7 +58,9 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; public class UnicastZenPingTests extends ESTestCase { - public void testSimplePings() throws InterruptedException { + private static final UnicastHostsProvider EMPTY_HOSTS_PROVIDER = Collections::emptyList; + + public void testSimplePings() throws IOException, InterruptedException { int startPort = 11000 + randomIntBetween(0, 1000); int endPort = startPort + 10; Settings settings = Settings.builder() @@ -73,7 +73,6 @@ public class UnicastZenPingTests extends ESTestCase { ThreadPool threadPool = new TestThreadPool(getClass().getName()); NetworkService networkService = new NetworkService(settings, Collections.emptyList()); - ElectMasterService electMasterService = new ElectMasterService(settings); NetworkHandle handleA = startServices(settings, threadPool, networkService, "UZP_A", Version.CURRENT); NetworkHandle handleB = startServices(settings, threadPool, networkService, "UZP_B", Version.CURRENT); @@ -95,8 +94,8 @@ public class UnicastZenPingTests extends ESTestCase { .build(); Settings hostsSettingsMismatch = Settings.builder().put(hostsSettings).put(settingsMismatch).build(); - UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, handleA.transportService, electMasterService, null); - zenPingA.setPingContextProvider(new PingContextProvider() { + UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, handleA.transportService, EMPTY_HOSTS_PROVIDER); + zenPingA.start(new PingContextProvider() { @Override public DiscoveryNodes nodes() { return DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A").build(); @@ -107,10 +106,9 @@ public class UnicastZenPingTests extends ESTestCase { return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build(); } }); - zenPingA.start(); - UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, handleB.transportService, electMasterService, null); - zenPingB.setPingContextProvider(new PingContextProvider() { + UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, handleB.transportService, EMPTY_HOSTS_PROVIDER); + zenPingB.start(new PingContextProvider() { @Override public DiscoveryNodes nodes() { return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build(); @@ -121,16 +119,14 @@ public class UnicastZenPingTests extends ESTestCase { return state; } }); - zenPingB.start(); - UnicastZenPing zenPingC = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleC.transportService, electMasterService, - null) { + UnicastZenPing zenPingC = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleC.transportService, EMPTY_HOSTS_PROVIDER) { @Override protected Version getVersion() { return versionD; } }; - zenPingC.setPingContextProvider(new PingContextProvider() { + zenPingC.start(new PingContextProvider() { @Override public DiscoveryNodes nodes() { return DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C").build(); @@ -141,10 +137,9 @@ public class UnicastZenPingTests extends ESTestCase { return state; } }); - zenPingC.start(); - UnicastZenPing zenPingD = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleD.transportService, electMasterService, null); - zenPingD.setPingContextProvider(new PingContextProvider() { + UnicastZenPing zenPingD = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleD.transportService, EMPTY_HOSTS_PROVIDER); + zenPingD.start(new PingContextProvider() { @Override public DiscoveryNodes nodes() { return DiscoveryNodes.builder().add(handleD.node).localNodeId("UZP_D").build(); @@ -155,7 +150,6 @@ public class UnicastZenPingTests extends ESTestCase { return state; } }); - zenPingD.start(); try { logger.info("ping from UZP_A"); @@ -185,15 +179,12 @@ public class UnicastZenPingTests extends ESTestCase { assertThat(pingResponses.size(), equalTo(0)); assertCounters(handleD, handleA, handleB, handleC, handleD); } finally { - zenPingA.close(); - zenPingB.close(); - zenPingC.close(); - zenPingD.close(); - handleA.transportService.close(); - handleB.transportService.close(); - handleC.transportService.close(); - handleD.transportService.close(); - terminate(threadPool); + try { + IOUtils.close(zenPingA, zenPingB, zenPingC, zenPingD, + handleA.transportService, handleB.transportService, handleC.transportService, handleD.transportService); + } finally { + terminate(threadPool); + } } } @@ -211,7 +202,7 @@ public class UnicastZenPingTests extends ESTestCase { MockTcpTransport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), networkService, version); final TransportService transportService = new TransportService(settings, transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); ConcurrentMap counters = ConcurrentCollections.newConcurrentMap(); @@ -229,16 +220,16 @@ public class UnicastZenPingTests extends ESTestCase { final DiscoveryNode node = new DiscoveryNode(nodeId, transportService.boundAddress().publishAddress(), emptyMap(), emptySet(), version); transportService.setLocalNode(node); - return new NetworkHandle((InetSocketTransportAddress)transport.boundAddress().publishAddress(), transportService, node, counters); + return new NetworkHandle((TransportAddress)transport.boundAddress().publishAddress(), transportService, node, counters); } private static class NetworkHandle { - public final InetSocketTransportAddress address; + public final TransportAddress address; public final TransportService transportService; public final DiscoveryNode node; public final ConcurrentMap counters; - public NetworkHandle(InetSocketTransportAddress address, TransportService transportService, DiscoveryNode discoveryNode, + public NetworkHandle(TransportAddress address, TransportService transportService, DiscoveryNode discoveryNode, ConcurrentMap counters) { this.address = address; this.transportService = transportService; diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index d9a8c9be7f4..6856d05365a 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -24,9 +24,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -34,19 +32,14 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.discovery.zen.fd.FaultDetection; -import org.elasticsearch.discovery.zen.membership.MembershipAction; -import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.TestCustomMetaData; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesTransportRequest; @@ -55,7 +48,6 @@ import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matchers; -import org.junit.Before; import java.io.IOException; import java.net.UnknownHostException; @@ -75,30 +67,13 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) -@ESIntegTestCase.SuppressLocalMode @TestLogging("_root:DEBUG") public class ZenDiscoveryIT extends ESIntegTestCase { - private Version previousMajorVersion; - - @Before - public void computePrevMajorVersion() { - Version previousMajor; - // find a GA build whose major version is statesFound = new ArrayList<>(); final CountDownLatch nodesStopped = new CountDownLatch(1); - clusterService.add(new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent event) { - statesFound.add(event.state()); - try { - // block until both nodes have stopped to accumulate node failures - nodesStopped.await(); - } catch (InterruptedException e) { - //meh - } + clusterService.add(event -> { + statesFound.add(event.state()); + try { + // block until both nodes have stopped to accumulate node failures + nodesStopped.await(); + } catch (InterruptedException e) { + //meh } }); @@ -184,10 +155,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { } public void testNodeRejectsClusterStateWithWrongMasterNode() throws Exception { - Settings settings = Settings.builder() - .put("discovery.type", "zen") - .build(); - List nodeNames = internalCluster().startNodesAsync(2, settings).get(); + List nodeNames = internalCluster().startNodesAsync(2).get(); client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); List nonMasterNodes = new ArrayList<>(nodeNames); @@ -204,7 +172,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { assert node != null; DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(state.nodes()) - .add(new DiscoveryNode("abc", new LocalTransportAddress("abc"), emptyMap(), + .add(new DiscoveryNode("abc", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)).masterNodeId("abc"); ClusterState.Builder builder = ClusterState.builder(state); builder.nodes(nodes); @@ -298,10 +266,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { " }\n" + "}"; - Settings nodeSettings = Settings.builder() - .put("discovery.type", "zen") // <-- To override the local setting if set externally - .build(); - internalCluster().startNode(nodeSettings); + internalCluster().startNode(); logger.info("--> request node discovery stats"); NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear().setDiscovery(true).get(); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index a7291dc3736..88cf23fe938 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -31,12 +31,10 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.zen.ping.ZenPing; -import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.AssertingAckListener; +import org.elasticsearch.discovery.zen.PublishClusterStateActionTests.AssertingAckListener; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -58,7 +56,9 @@ import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_M import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; public class ZenDiscoveryUnitTests extends ESTestCase { @@ -67,9 +67,9 @@ public class ZenDiscoveryUnitTests extends ESTestCase { ClusterName clusterName = new ClusterName("abc"); DiscoveryNodes.Builder currentNodes = DiscoveryNodes.builder(); - currentNodes.masterNodeId("a").add(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); + currentNodes.masterNodeId("a").add(new DiscoveryNode("a", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)); DiscoveryNodes.Builder newNodes = DiscoveryNodes.builder(); - newNodes.masterNodeId("a").add(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); + newNodes.masterNodeId("a").add(new DiscoveryNode("a", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)); ClusterState.Builder currentState = ClusterState.builder(clusterName); currentState.nodes(currentNodes); @@ -87,8 +87,8 @@ public class ZenDiscoveryUnitTests extends ESTestCase { assertFalse("should not ignore, because new state's version is higher to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); currentNodes = DiscoveryNodes.builder(); - currentNodes.masterNodeId("b").add(new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); - ; + currentNodes.masterNodeId("b").add(new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)); + // version isn't taken into account, so randomize it to ensure this. if (randomBoolean()) { currentState.version(2); @@ -125,7 +125,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { ArrayList allNodes = new ArrayList<>(); for (int i = randomIntBetween(10, 20); i >= 0; i--) { Set roles = new HashSet<>(randomSubsetOf(Arrays.asList(Role.values()))); - DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(), + DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, Version.CURRENT); responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomLong())); allNodes.add(node); @@ -155,7 +155,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { try { Set expectedFDNodes = null; - final MockTransportService masterTransport = MockTransportService.local(settings, Version.CURRENT, threadPool); + final MockTransportService masterTransport = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null); masterTransport.start(); DiscoveryNode masterNode = new DiscoveryNode("master", masterTransport.boundAddress().publishAddress(), Version.CURRENT); toClose.add(masterTransport); @@ -171,7 +171,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { toClose.add(masterZen); masterTransport.acceptIncomingRequests(); - final MockTransportService otherTransport = MockTransportService.local(settings, Version.CURRENT, threadPool); + final MockTransportService otherTransport = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null); otherTransport.start(); toClose.add(otherTransport); DiscoveryNode otherNode = new DiscoveryNode("other", otherTransport.boundAddress().publishAddress(), Version.CURRENT); @@ -185,7 +185,6 @@ public class ZenDiscoveryUnitTests extends ESTestCase { toClose.add(otherZen); otherTransport.acceptIncomingRequests(); - masterTransport.connectToNode(otherNode); otherTransport.connectToNode(masterNode); @@ -216,12 +215,62 @@ public class ZenDiscoveryUnitTests extends ESTestCase { } } + public void testPendingCSQueueIsClearedWhenClusterStatePublished() throws Exception { + ThreadPool threadPool = new TestThreadPool(getClass().getName()); + // randomly make minimum_master_nodes a value higher than we have nodes for, so it will force failure + int minMasterNodes = randomBoolean() ? 3 : 1; + Settings settings = Settings.builder() + .put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)).build(); + + ArrayList toClose = new ArrayList<>(); + try { + final MockTransportService masterTransport = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null); + masterTransport.start(); + DiscoveryNode masterNode = new DiscoveryNode("master", masterTransport.boundAddress().publishAddress(), Version.CURRENT); + toClose.add(masterTransport); + masterTransport.setLocalNode(masterNode); + ClusterState state = ClusterStateCreationUtils.state(masterNode, null, masterNode); + // build the zen discovery and cluster service + ClusterService masterClusterService = createClusterService(threadPool, masterNode); + toClose.add(masterClusterService); + state = ClusterState.builder(masterClusterService.getClusterName()).nodes(state.nodes()).build(); + setState(masterClusterService, state); + ZenDiscovery masterZen = buildZenDiscovery(settings, masterTransport, masterClusterService, threadPool); + toClose.add(masterZen); + masterTransport.acceptIncomingRequests(); + + // inject a pending cluster state + masterZen.pendingClusterStatesQueue().addPending(ClusterState.builder(new ClusterName("foreign")).build()); + + // a new cluster state with a new discovery node (we will test if the cluster state + // was updated by the presence of this node in NodesFaultDetection) + ClusterState newState = ClusterState.builder(masterClusterService.state()).incrementVersion().nodes( + DiscoveryNodes.builder(masterClusterService.state().nodes()).masterNodeId(masterNode.getId()) + ).build(); + + + try { + // publishing a new cluster state + ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent("testing", newState, state); + AssertingAckListener listener = new AssertingAckListener(newState.nodes().getSize() - 1); + masterZen.publish(clusterChangedEvent, listener); + listener.await(1, TimeUnit.HOURS); + // publish was a success, check that queue as cleared + assertThat(masterZen.pendingClusterStates(), emptyArray()); + } catch (Discovery.FailedToCommitClusterStateException e) { + // not successful, so the pending queue should stay + assertThat(masterZen.pendingClusterStates(), arrayWithSize(1)); + assertThat(masterZen.pendingClusterStates()[0].getClusterName().value(), equalTo("foreign")); + } + } finally { + IOUtils.close(toClose); + terminate(threadPool); + } + } + private ZenDiscovery buildZenDiscovery(Settings settings, TransportService service, ClusterService clusterService, ThreadPool threadPool) { ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ZenPingService zenPingService = new ZenPingService(settings, Collections.emptySet()); - ElectMasterService electMasterService = new ElectMasterService(settings); - ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, clusterService, - clusterSettings, zenPingService, electMasterService); + ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, clusterService, clusterSettings, new MockZenPing(settings)); zenDiscovery.start(); return zenDiscovery; } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java index 2275756e8ee..9c70587d0e5 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java @@ -23,8 +23,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; -import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -43,7 +41,7 @@ public class ZenPingTests extends ESTestCase { long clusterStateVersionPerNode[] = new long[nodes.length]; ArrayList pings = new ArrayList<>(); for (int i = 0; i < nodes.length; i++) { - nodes[i] = new DiscoveryNode("" + i, LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + nodes[i] = new DiscoveryNode("" + i, buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); } for (int pingCount = scaledRandomIntBetween(10, nodes.length * 10); pingCount > 0; pingCount--) { diff --git a/core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java b/core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java index d9371df09ae..8d9cacd5f0e 100644 --- a/core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java +++ b/core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java @@ -23,9 +23,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import static org.elasticsearch.client.Requests.createIndexRequest; -/** - * - */ public class AliasedIndexDocumentActionsIT extends DocumentActionsIT { @Override diff --git a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java index 91c2655b3c2..e3556c8cc7c 100644 --- a/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/core/src/test/java/org/elasticsearch/document/DocumentActionsIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; +import static org.elasticsearch.action.DocWriteRequest.OpType; import static org.elasticsearch.client.Requests.clearIndicesCacheRequest; import static org.elasticsearch.client.Requests.getRequest; import static org.elasticsearch.client.Requests.indexRequest; @@ -191,31 +192,31 @@ public class DocumentActionsIT extends ESIntegTestCase { assertThat(bulkResponse.getItems().length, equalTo(5)); assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[0].getOpType(), equalTo("index")); + assertThat(bulkResponse.getItems()[0].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[0].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[0].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[0].getId(), equalTo("1")); assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[1].getOpType(), equalTo("create")); + assertThat(bulkResponse.getItems()[1].getOpType(), equalTo(OpType.CREATE)); assertThat(bulkResponse.getItems()[1].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[1].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[1].getId(), equalTo("2")); assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[2].getOpType(), equalTo("index")); + assertThat(bulkResponse.getItems()[2].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[2].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[2].getType(), equalTo("type1")); String generatedId3 = bulkResponse.getItems()[2].getId(); assertThat(bulkResponse.getItems()[3].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[3].getOpType(), equalTo("delete")); + assertThat(bulkResponse.getItems()[3].getOpType(), equalTo(OpType.DELETE)); assertThat(bulkResponse.getItems()[3].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[3].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[3].getId(), equalTo("1")); assertThat(bulkResponse.getItems()[4].isFailed(), equalTo(true)); - assertThat(bulkResponse.getItems()[4].getOpType(), equalTo("index")); + assertThat(bulkResponse.getItems()[4].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[4].getIndex(), equalTo(getConcreteIndexName())); assertThat(bulkResponse.getItems()[4].getType(), equalTo("type1")); diff --git a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java index 0ba97bee899..814a861139e 100644 --- a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java +++ b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java @@ -39,8 +39,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -/** - */ public class ShardInfoIT extends ESIntegTestCase { private int numCopies; private int numNodes; diff --git a/core/src/test/java/org/elasticsearch/explain/ExplainActionIT.java b/core/src/test/java/org/elasticsearch/explain/ExplainActionIT.java index 528b03bc831..30ae9a7b6f7 100644 --- a/core/src/test/java/org/elasticsearch/explain/ExplainActionIT.java +++ b/core/src/test/java/org/elasticsearch/explain/ExplainActionIT.java @@ -46,8 +46,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; -/** - */ public class ExplainActionIT extends ESIntegTestCase { public void testSimple() throws Exception { assertAcked(prepareCreate("test") diff --git a/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java index 8cd1b479416..a9134c9ba4e 100644 --- a/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java +++ b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java @@ -48,8 +48,6 @@ import static org.elasticsearch.action.fieldstats.IndexConstraint.Property.MIN; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -/** - */ public class FieldStatsTests extends ESSingleNodeTestCase { public void testByte() { testNumberRange("field1", "byte", 12, 18); diff --git a/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java index 043eaa2708f..13e39a342dc 100644 --- a/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -42,14 +41,12 @@ import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; -/** - */ public class AsyncShardFetchTests extends ESTestCase { - private final DiscoveryNode node1 = new DiscoveryNode("node1", LocalTransportAddress.buildUnique(), Collections.emptyMap(), + private final DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT); private final Response response1 = new Response(node1); private final Throwable failure1 = new Throwable("simulated failure 1"); - private final DiscoveryNode node2 = new DiscoveryNode("node2", LocalTransportAddress.buildUnique(), Collections.emptyMap(), + private final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT); private final Response response2 = new Response(node2); private final Throwable failure2 = new Throwable("simulate failure 2"); diff --git a/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java b/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java index 74df2f33fc0..581ef0f99a3 100644 --- a/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java @@ -35,8 +35,6 @@ import java.util.Map; import static org.hamcrest.Matchers.equalTo; -/** - */ public class DanglingIndicesStateTests extends ESTestCase { private static Settings indexSettings = Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java index d2085ab9147..2bec3d5eded 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java @@ -36,7 +36,7 @@ public class GatewayServiceTests extends ESTestCase { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null); return new GatewayService(settings.build(), - null, clusterService, null, null, null, new NoopDiscovery(), null, null); + null, clusterService, null, null, null, new NoopDiscovery(), null); } public void testDefaultRecoverAfterTime() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java index 82c38748a48..62f040c0163 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java @@ -30,8 +30,6 @@ import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - */ public class MetaStateServiceTests extends ESTestCase { private static Settings indexSettings = Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index 0fd89ec8898..2af4d49f742 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -65,8 +65,6 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -/** - */ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { private final ShardId shardId = new ShardId("test", "_na_", 0); diff --git a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java index c820bccae51..226b1422b4d 100644 --- a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -37,9 +37,6 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -/** - * - */ @ClusterScope(numDataNodes =0, scope= Scope.TEST) public class QuorumGatewayIT extends ESIntegTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index 188f10c588e..22f7fe2498f 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -63,8 +63,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.unmodifiableMap; import static org.hamcrest.Matchers.equalTo; -/** - */ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { private final ShardId shardId = new ShardId("test", "_na_", 0); private final DiscoveryNode node1 = newNode("node1"); diff --git a/core/src/test/java/org/elasticsearch/http/HttpServerTests.java b/core/src/test/java/org/elasticsearch/http/HttpServerTests.java index 87167cdb733..eb22e5bac47 100644 --- a/core/src/test/java/org/elasticsearch/http/HttpServerTests.java +++ b/core/src/test/java/org/elasticsearch/http/HttpServerTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -142,7 +141,7 @@ public class HttpServerTests extends ESTestCase { @Override public BoundTransportAddress boundAddress() { - LocalTransportAddress transportAddress = new LocalTransportAddress("1"); + TransportAddress transportAddress = buildNewFakeTransportAddress(); return new BoundTransportAddress(new TransportAddress[] {transportAddress} ,transportAddress); } diff --git a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 1504f624f91..92e1cbbcbaa 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -30,12 +30,11 @@ import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.Weight; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.SetOnce.AlreadySetException; import org.elasticsearch.Version; -import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -68,7 +67,6 @@ import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.script.ScriptContextRegistry; import org.elasticsearch.script.ScriptEngineRegistry; -import org.elasticsearch.script.ScriptEngineService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptSettings; import org.elasticsearch.search.internal.SearchContext; @@ -82,12 +80,11 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import java.io.IOException; -import java.util.Arrays; import java.util.Collections; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; +import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; public class IndexModuleTests extends ESTestCase { @@ -96,7 +93,6 @@ public class IndexModuleTests extends ESTestCase { private IndexSettings indexSettings; private Environment environment; private NodeEnvironment nodeEnvironment; - private NodeServicesProvider nodeServicesProvider; private IndicesQueryCache indicesQueryCache; private IndexService.ShardStoreDeleter deleter = new IndexService.ShardStoreDeleter() { @@ -110,30 +106,32 @@ public class IndexModuleTests extends ESTestCase { private final IndexFieldDataCache.Listener listener = new IndexFieldDataCache.Listener() {}; private MapperRegistry mapperRegistry; - - static NodeServicesProvider newNodeServiceProvider(Settings settings, Environment environment, Client client, ScriptEngineService... scriptEngineServices) throws IOException { - // TODO this can be used in other place too - lets first refactor the IndicesQueriesRegistry - ThreadPool threadPool = new TestThreadPool("test"); - CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - BigArrays bigArrays = new BigArrays(settings, circuitBreakerService); - ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(Arrays.asList(scriptEngineServices)); - ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList()); - ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); - ScriptService scriptService = new ScriptService(settings, environment, new ResourceWatcherService(settings, threadPool), scriptEngineRegistry, scriptContextRegistry, scriptSettings); - IndicesQueriesRegistry indicesQueriesRegistry = new IndicesQueriesRegistry(); - ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - return new NodeServicesProvider(threadPool, bigArrays, client, scriptService, indicesQueriesRegistry, circuitBreakerService, clusterService); - } + private ThreadPool threadPool; + private CircuitBreakerService circuitBreakerService; + private BigArrays bigArrays; + private ScriptService scriptService; + private IndicesQueriesRegistry indicesQueriesRegistry; + private ClusterService clusterService; @Override public void setUp() throws Exception { super.setUp(); - settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); + settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); indicesQueryCache = new IndicesQueryCache(settings); indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); index = indexSettings.getIndex(); environment = new Environment(settings); - nodeServicesProvider = newNodeServiceProvider(settings, environment, null); + threadPool = new TestThreadPool("test"); + circuitBreakerService = new NoneCircuitBreakerService(); + bigArrays = new BigArrays(settings, circuitBreakerService); + ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(emptyList()); + ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList()); + ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); + scriptService = new ScriptService(settings, environment, new ResourceWatcherService(settings, threadPool), scriptEngineRegistry, + scriptContextRegistry, scriptSettings); + indicesQueriesRegistry = new IndicesQueriesRegistry(); + clusterService = ClusterServiceUtils.createClusterService(threadPool); nodeEnvironment = new NodeEnvironment(settings, environment); mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); } @@ -141,10 +139,14 @@ public class IndexModuleTests extends ESTestCase { @Override public void tearDown() throws Exception { super.tearDown(); - nodeEnvironment.close(); - indicesQueryCache.close(); - nodeServicesProvider.getClusterService().close(); - ThreadPool.terminate(nodeServicesProvider.getThreadPool(), 10, TimeUnit.SECONDS); + IOUtils.close(nodeEnvironment, indicesQueryCache, clusterService); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + private IndexService newIndexService(IndexModule module) throws IOException { + return module.newIndexService(nodeEnvironment, deleter, circuitBreakerService, bigArrays, threadPool, scriptService, + indicesQueriesRegistry, clusterService, null, indicesQueryCache, mapperRegistry, shardId -> {}, + new IndicesFieldDataCache(settings, listener)); } public void testWrapperIsBound() throws IOException { @@ -152,8 +154,8 @@ public class IndexModuleTests extends ESTestCase { new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); module.setSearcherWrapper((s) -> new Wrapper()); module.engineFactory.set(new MockEngineFactory(AssertingDirectoryReader.class)); - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, - mapperRegistry, shardId -> {} ,new IndicesFieldDataCache(settings, listener)); + + IndexService indexService = newIndexService(module); assertTrue(indexService.getSearcherWrapper() instanceof Wrapper); assertSame(indexService.getEngineFactory(), module.engineFactory.get()); indexService.close("simon says", false); @@ -177,8 +179,8 @@ public class IndexModuleTests extends ESTestCase { } catch (IllegalArgumentException ex) { // fine } - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, - mapperRegistry, shardId -> {}, new IndicesFieldDataCache(settings, listener)); + + IndexService indexService = newIndexService(module); assertTrue(indexService.getIndexStore() instanceof FooStore); indexService.close("simon says", false); @@ -196,8 +198,7 @@ public class IndexModuleTests extends ESTestCase { IndexModule module = new IndexModule(indexSettings, null, new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); module.addIndexEventListener(eventListener); - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, - shardId -> {}, new IndicesFieldDataCache(settings, this.listener)); + IndexService indexService = newIndexService(module); IndexSettings x = indexService.getIndexSettings(); assertEquals(x.getSettings().getAsMap(), indexSettings.getSettings().getAsMap()); assertEquals(x.getIndex(), index); @@ -222,8 +223,7 @@ public class IndexModuleTests extends ESTestCase { } - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, - shardId -> {}, new IndicesFieldDataCache(settings, listener)); + IndexService indexService = newIndexService(module); assertSame(booleanSetting, indexService.getIndexSettings().getScopedSettings().get(booleanSetting.getKey())); indexService.close("simon says", false); @@ -245,9 +245,7 @@ public class IndexModuleTests extends ESTestCase { expectThrows(IllegalArgumentException.class, () -> module.addIndexOperationListener(listener)); expectThrows(IllegalArgumentException.class, () -> module.addIndexOperationListener(null)); - - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, - shardId -> {}, new IndicesFieldDataCache(settings, this.listener)); + IndexService indexService = newIndexService(module); assertEquals(2, indexService.getIndexOperationListeners().size()); assertEquals(IndexingSlowLog.class, indexService.getIndexOperationListeners().get(0).getClass()); assertSame(listener, indexService.getIndexOperationListeners().get(1)); @@ -276,9 +274,7 @@ public class IndexModuleTests extends ESTestCase { expectThrows(IllegalArgumentException.class, () -> module.addSearchOperationListener(listener)); expectThrows(IllegalArgumentException.class, () -> module.addSearchOperationListener(null)); - - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, - shardId -> {}, new IndicesFieldDataCache(settings, this.listener)); + IndexService indexService = newIndexService(module); assertEquals(2, indexService.getSearchOperationListener().size()); assertEquals(SearchSlowLog.class, indexService.getSearchOperationListener().get(0).getClass()); assertSame(listener, indexService.getSearchOperationListener().get(1)); @@ -311,8 +307,7 @@ public class IndexModuleTests extends ESTestCase { } }); - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, - shardId -> {}, new IndicesFieldDataCache(settings, listener)); + IndexService indexService = newIndexService(module); SimilarityService similarityService = indexService.similarityService(); assertNotNull(similarityService.getSimilarity("my_similarity")); assertTrue(similarityService.getSimilarity("my_similarity").get() instanceof TestSimilarity); @@ -343,12 +338,8 @@ public class IndexModuleTests extends ESTestCase { .build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), null, new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); - try { - module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, - shardId -> {}, new IndicesFieldDataCache(settings, listener)); - } catch (IllegalArgumentException ex) { - assertEquals("Unknown Similarity type [test_similarity] for [my_similarity]", ex.getMessage()); - } + Exception ex = expectThrows(IllegalArgumentException.class, () -> newIndexService(module)); + assertEquals("Unknown Similarity type [test_similarity] for [my_similarity]", ex.getMessage()); } public void testSetupWithoutType() throws IOException { @@ -359,12 +350,8 @@ public class IndexModuleTests extends ESTestCase { .build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), null, new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); - try { - module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, - shardId -> {}, new IndicesFieldDataCache(settings, listener)); - } catch (IllegalArgumentException ex) { - assertEquals("Similarity [my_similarity] must have an associated type", ex.getMessage()); - } + Exception ex = expectThrows(IllegalArgumentException.class, () -> newIndexService(module)); + assertEquals("Similarity [my_similarity] must have an associated type", ex.getMessage()); } public void testForceCustomQueryCache() throws IOException { @@ -375,8 +362,7 @@ public class IndexModuleTests extends ESTestCase { new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); module.forceQueryCacheProvider((a, b) -> new CustomQueryCache()); expectThrows(AlreadySetException.class, () -> module.forceQueryCacheProvider((a, b) -> new CustomQueryCache())); - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, - shardId -> {}, new IndicesFieldDataCache(settings, listener)); + IndexService indexService = newIndexService(module); assertTrue(indexService.cache().query() instanceof CustomQueryCache); indexService.close("simon says", false); } @@ -387,8 +373,7 @@ public class IndexModuleTests extends ESTestCase { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), null, new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, - shardId -> {}, new IndicesFieldDataCache(settings, listener)); + IndexService indexService = newIndexService(module); assertTrue(indexService.cache().query() instanceof IndexQueryCache); indexService.close("simon says", false); } @@ -401,8 +386,7 @@ public class IndexModuleTests extends ESTestCase { IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), null, new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap())); module.forceQueryCacheProvider((a, b) -> new CustomQueryCache()); - IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, - shardId -> {}, new IndicesFieldDataCache(settings, listener)); + IndexService indexService = newIndexService(module); assertTrue(indexService.cache().query() instanceof DisabledQueryCache); indexService.close("simon says", false); } diff --git a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java index e381bf54520..a601f238d24 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -77,82 +77,6 @@ public class IndexServiceTests extends ESSingleNodeTestCase { return new CompressedXContent(builder.string()); } - public void testFilteringAliases() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - add(indexService, "cats", filter(termQuery("animal", "cat"))); - add(indexService, "dogs", filter(termQuery("animal", "dog"))); - add(indexService, "all", null); - - assertThat(indexService.getMetaData().getAliases().containsKey("cats"), equalTo(true)); - assertThat(indexService.getMetaData().getAliases().containsKey("dogs"), equalTo(true)); - assertThat(indexService.getMetaData().getAliases().containsKey("turtles"), equalTo(false)); - - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats").toString(), equalTo("animal:cat")); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats", "dogs").toString(), equalTo("animal:cat animal:dog")); - - // Non-filtering alias should turn off all filters because filters are ORed - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "all"), nullValue()); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "cats", "all"), nullValue()); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "all", "cats"), nullValue()); - - add(indexService, "cats", filter(termQuery("animal", "feline"))); - add(indexService, "dogs", filter(termQuery("animal", "canine"))); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:canine animal:feline")); - } - - public void testAliasFilters() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - - add(indexService, "cats", filter(termQuery("animal", "cat"))); - add(indexService, "dogs", filter(termQuery("animal", "dog"))); - - assertThat(indexService.aliasFilter(indexService.newQueryShardContext()), nullValue()); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs").toString(), equalTo("animal:dog")); - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:dog animal:cat")); - - add(indexService, "cats", filter(termQuery("animal", "feline"))); - add(indexService, "dogs", filter(termQuery("animal", "canine"))); - - assertThat(indexService.aliasFilter(indexService.newQueryShardContext(), "dogs", "cats").toString(), equalTo("animal:canine animal:feline")); - } - - public void testRemovedAliasFilter() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - - add(indexService, "cats", filter(termQuery("animal", "cat"))); - remove(indexService, "cats"); - try { - indexService.aliasFilter(indexService.newQueryShardContext(), "cats"); - fail("Expected InvalidAliasNameException"); - } catch (InvalidAliasNameException e) { - assertThat(e.getMessage(), containsString("Invalid alias name [cats]")); - } - } - - public void testUnknownAliasFilter() throws Exception { - IndexService indexService = createIndex("test", Settings.EMPTY); - - add(indexService, "cats", filter(termQuery("animal", "cat"))); - add(indexService, "dogs", filter(termQuery("animal", "dog"))); - - try { - indexService.aliasFilter(indexService.newQueryShardContext(), "unknown"); - fail(); - } catch (InvalidAliasNameException e) { - // all is well - } - } - - private void remove(IndexService service, String alias) { - IndexMetaData build = IndexMetaData.builder(service.getMetaData()).removeAlias(alias).build(); - service.updateMetaData(build); - } - - private void add(IndexService service, String alias, @Nullable CompressedXContent filter) { - IndexMetaData build = IndexMetaData.builder(service.getMetaData()).putAlias(AliasMetaData.builder(alias).filter(filter).build()).build(); - service.updateMetaData(build); - } - public void testBaseAsyncTask() throws InterruptedException, IOException { IndexService indexService = createIndex("test", Settings.EMPTY); AtomicReference latch = new AtomicReference<>(new CountDownLatch(1)); diff --git a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index 3909354c989..97a6c6abf70 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -16,11 +16,11 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.index; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; @@ -29,18 +29,20 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.core.StringContains.containsString; +import static org.hamcrest.object.HasToString.hasToString; + public class IndexSettingsTests extends ESTestCase { public void testRunListener() { @@ -348,26 +350,48 @@ public class IndexSettingsTests extends ESTestCase { assertEquals(actualNewTranslogFlushThresholdSize, settings.getFlushThresholdSize()); } - public void testArchiveBrokenIndexSettings() { - Settings settings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrBrokenSettings(Settings.EMPTY); + Settings settings = + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings( + Settings.EMPTY, + e -> { assert false : "should not have been invoked, no unknown settings"; }, + (e, ex) -> { assert false : "should not have been invoked, no invalid settings"; }); assertSame(settings, Settings.EMPTY); - settings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrBrokenSettings(Settings.builder() - .put("index.refresh_interval", "-200").build()); + settings = + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings( + Settings.builder().put("index.refresh_interval", "-200").build(), + e -> { assert false : "should not have been invoked, no invalid settings"; }, + (e, ex) -> { + assertThat(e.getKey(), equalTo("index.refresh_interval")); + assertThat(e.getValue(), equalTo("-200")); + assertThat(ex, hasToString(containsString("failed to parse setting [index.refresh_interval] with value [-200]"))); + }); assertEquals("-200", settings.get("archived.index.refresh_interval")); assertNull(settings.get("index.refresh_interval")); Settings prevSettings = settings; // no double archive - settings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrBrokenSettings(prevSettings); + settings = + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings( + prevSettings, + e -> { assert false : "should not have been invoked, no unknown settings"; }, + (e, ex) -> { assert false : "should not have been invoked, no invalid settings"; }); assertSame(prevSettings, settings); - settings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrBrokenSettings(Settings.builder() - .put("index.version.created", Version.CURRENT.id) // private setting - .put("index.unknown", "foo") - .put("index.refresh_interval", "2s").build()); + settings = + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings( + Settings.builder() + .put("index.version.created", Version.CURRENT.id) // private setting + .put("index.unknown", "foo") + .put("index.refresh_interval", "2s").build(), + e -> { + assertThat(e.getKey(), equalTo("index.unknown")); + assertThat(e.getValue(), equalTo("foo")); + }, + (e, ex) -> { assert false : "should not have been invoked, no invalid settings"; }); assertEquals("foo", settings.get("archived.index.unknown")); assertEquals(Integer.toString(Version.CURRENT.id), settings.get("index.version.created")); assertEquals("2s", settings.get("index.refresh_interval")); } + } diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java index 143fdc9fc28..a335a42edb6 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java @@ -291,13 +291,14 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { assertThat(gResp2.getSource().get("foo"), equalTo("bar")); // Node1 has the primary, now node2 has the replica - String node2 = internalCluster().startNode(nodeSettings); + internalCluster().startNode(nodeSettings); ensureGreen(IDX); client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); flushAndRefresh(IDX); logger.info("--> stopping node1 [{}]", node1); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1)); + ensureClusterSizeConsistency(); // wait for the new node to be elected and process the node leave ensureYellow(IDX); logger.info("--> performing query"); @@ -599,7 +600,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { // deleting the index and hence, deleting all the shard data for the index, the test // failure still showed some Lucene files in the data directory for that index. Not sure // why that is, so turning on more logging here. - @TestLogging("org.elasticsearch.indices:TRACE,org.elasticsearch.env:TRACE") + @TestLogging("org.elasticsearch.indices:TRACE,org.elasticsearch.env:TRACE,_root:DEBUG") public void testShadowReplicaNaturalRelocation() throws Exception { Path dataPath = createTempDir(); Settings nodeSettings = nodeSettings(dataPath); diff --git a/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index 062f8c01f42..76d3bfbc484 100644 --- a/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.ScriptService; @@ -46,8 +47,7 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase { protected SearchContext createSearchContext(IndexService indexService) { BigArrays bigArrays = indexService.getBigArrays(); ThreadPool threadPool = indexService.getThreadPool(); - ScriptService scriptService = node().injector().getInstance(ScriptService.class); - return new TestSearchContext(threadPool, bigArrays, scriptService, indexService) { + return new TestSearchContext(threadPool, bigArrays, indexService) { @Override public ShardSearchRequest request() { return new ShardSearchRequest() { @@ -82,8 +82,8 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase { } @Override - public String[] filteringAliases() { - return new String[0]; + public QueryBuilder filteringAliases() { + return null; } @Override diff --git a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java deleted file mode 100644 index 1ead4554a11..00000000000 --- a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index; - -import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RoutingNodes; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.fd.FaultDetection; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.TransportService; - -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import static java.util.Collections.singleton; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; -import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; -import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; -import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; -import static org.hamcrest.Matchers.equalTo; - -/** - * Test failure when index replication actions fail mid-flight - */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) -@ESIntegTestCase.SuppressLocalMode -public class TransportIndexFailuresIT extends ESIntegTestCase { - - private static final Settings nodeSettings = Settings.builder() - .put("discovery.type", "zen") // <-- To override the local setting if set externally - .put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly - .put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // <-- for hitting simulated network failures quickly - .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly - .put("discovery.zen.minimum_master_nodes", 1) - .build(); - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); - } - - @Override - protected int numberOfShards() { - return 1; - } - - @Override - protected int numberOfReplicas() { - return 1; - } - - public void testNetworkPartitionDuringReplicaIndexOp() throws Exception { - final String INDEX = "testidx"; - - List nodes = internalCluster().startNodesAsync(2, nodeSettings).get(); - - // Create index test with 1 shard, 1 replica and ensure it is green - createIndex(INDEX); - ensureGreen(INDEX); - - // Disable allocation so the replica cannot be reallocated when it fails - Settings s = Settings.builder().put("cluster.routing.allocation.enable", "none").build(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(s).get(); - - // Determine which node holds the primary shard - ClusterState state = getNodeClusterState(nodes.get(0)); - IndexShardRoutingTable shard = state.getRoutingTable().index(INDEX).shard(0); - String primaryNode; - String replicaNode; - if (shard.getShards().get(0).primary()) { - primaryNode = nodes.get(0); - replicaNode = nodes.get(1); - } else { - primaryNode = nodes.get(1); - replicaNode = nodes.get(0); - } - logger.info("--> primary shard is on {}", primaryNode); - - // Index a document to make sure everything works well - IndexResponse resp = internalCluster().client(primaryNode).prepareIndex(INDEX, "doc").setSource("foo", "bar").get(); - assertThat("document exists on primary node", - internalCluster().client(primaryNode).prepareGet(INDEX, "doc", resp.getId()).setPreference("_only_local").get().isExists(), - equalTo(true)); - assertThat("document exists on replica node", - internalCluster().client(replicaNode).prepareGet(INDEX, "doc", resp.getId()).setPreference("_only_local").get().isExists(), - equalTo(true)); - - // Disrupt the network so indexing requests fail to replicate - logger.info("--> preventing index/replica operations"); - TransportService mockTransportService = internalCluster().getInstance(TransportService.class, primaryNode); - ((MockTransportService) mockTransportService).addFailToSendNoConnectRule( - internalCluster().getInstance(TransportService.class, replicaNode), - singleton(IndexAction.NAME + "[r]") - ); - mockTransportService = internalCluster().getInstance(TransportService.class, replicaNode); - ((MockTransportService) mockTransportService).addFailToSendNoConnectRule( - internalCluster().getInstance(TransportService.class, primaryNode), - singleton(IndexAction.NAME + "[r]") - ); - - logger.info("--> indexing into primary"); - // the replica shard should now be marked as failed because the replication operation will fail - resp = internalCluster().client(primaryNode).prepareIndex(INDEX, "doc").setSource("foo", "baz").get(); - // wait until the cluster reaches an exact yellow state, meaning replica has failed - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(client().admin().cluster().prepareHealth().get().getStatus(), equalTo(ClusterHealthStatus.YELLOW)); - } - }); - assertThat("document should still be indexed and available", - client().prepareGet(INDEX, "doc", resp.getId()).get().isExists(), equalTo(true)); - - state = getNodeClusterState(randomFrom(nodes.toArray(Strings.EMPTY_ARRAY))); - RoutingNodes rn = state.getRoutingNodes(); - logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}", - rn.shards(input -> true).size(), - rn.shardsWithState(UNASSIGNED).size(), - rn.shardsWithState(INITIALIZING).size(), - rn.shardsWithState(RELOCATING).size(), - rn.shardsWithState(STARTED).size()); - logger.info("--> unassigned: {}, initializing: {}, relocating: {}, started: {}", - rn.shardsWithState(UNASSIGNED), - rn.shardsWithState(INITIALIZING), - rn.shardsWithState(RELOCATING), - rn.shardsWithState(STARTED)); - - assertThat("only a single shard is now active (replica should be failed and not reallocated)", - rn.shardsWithState(STARTED).size(), equalTo(1)); - } - - private ClusterState getNodeClusterState(String node) { - return internalCluster().client(node).admin().cluster().prepareState().setLocal(true).get().getState(); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/WaitUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/WaitUntilRefreshIT.java index 378947ec345..0f72e72f6a2 100644 --- a/core/src/test/java/org/elasticsearch/index/WaitUntilRefreshIT.java +++ b/core/src/test/java/org/elasticsearch/index/WaitUntilRefreshIT.java @@ -35,7 +35,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.NativeScriptFactory; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Before; @@ -109,7 +109,7 @@ public class WaitUntilRefreshIT extends ESIntegTestCase { assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "cat")).get(), "2"); // Update-becomes-delete with RefreshPolicy.WAIT_UNTIL - update = client().prepareUpdate("test", "test", "2").setScript(new Script("delete_plz", ScriptType.INLINE, "native", emptyMap())) + update = client().prepareUpdate("test", "test", "2").setScript(new Script(ScriptType.INLINE, "native", "delete_plz", emptyMap())) .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).get(); assertEquals(2, update.getVersion()); assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java index 206dffd0fb7..f993cc1490c 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java @@ -28,8 +28,6 @@ import org.elasticsearch.test.IndexSettingsModule; import static org.elasticsearch.test.ESTestCase.createTestAnalysis; -/** - */ public class CharFilterTests extends ESTokenStreamTestCase { public void testMappingCharFilter() throws Exception { Settings settings = Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java index ede42404558..e8734331167 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java @@ -49,8 +49,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.instanceOf; -/** - */ public class CompoundAnalysisTests extends ESTestCase { public void testDefaultsCompoundAnalysis() throws Exception { Settings settings = getJsonSettings(); diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index b1dcdec646b..7dc55b43700 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -39,9 +39,6 @@ import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -/** - * - */ public class PreBuiltAnalyzerTests extends ESSingleNodeTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java index 7c4818c63b3..c4632e57490 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java @@ -37,9 +37,6 @@ import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntB import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; import static org.hamcrest.Matchers.instanceOf; -/** - * - */ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase { public void testEnglishFilterFactory() throws IOException { int iters = scaledRandomIntBetween(20, 100); diff --git a/core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java index ed6866e6a81..19c6bf64dcc 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java @@ -36,6 +36,7 @@ import java.io.InputStream; import java.io.StringReader; import java.nio.file.Files; import java.nio.file.Path; + public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase { public void testDefault() throws IOException { Settings settings = Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java index c6dfdc1a413..c4842e497ef 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java @@ -42,8 +42,6 @@ import java.nio.file.Path; import static org.hamcrest.Matchers.equalTo; -/** - */ public class SynonymsAnalysisTests extends ESTestCase { protected final Logger logger = Loggers.getLogger(getClass()); private IndexAnalyzers indexAnalyzers; diff --git a/core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java b/core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java index 362ee9c5332..767b0511ddf 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java +++ b/core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java @@ -27,9 +27,6 @@ import org.hamcrest.TypeSafeMatcher; import java.io.IOException; -/** - * - */ public final class EngineSearcherTotalHitsMatcher extends TypeSafeMatcher { private final Query query; diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index ebf4e6ad217..dbdae7a8b41 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -31,10 +31,13 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; @@ -80,6 +83,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.codec.CodecService; @@ -114,6 +118,7 @@ import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.OldIndexUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.MatcherAssert; @@ -142,10 +147,13 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; @@ -294,12 +302,21 @@ public class InternalEngineTests extends ESTestCase { } protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { - return createEngine(defaultSettings, store, translogPath, newMergePolicy()); + return createEngine(defaultSettings, store, translogPath, newMergePolicy(), null); } protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) throws IOException { + return createEngine(indexSettings, store, translogPath, mergePolicy, null); + + } + protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, Supplier indexWriterSupplier) throws IOException { EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null); - InternalEngine internalEngine = new InternalEngine(config); + InternalEngine internalEngine = new InternalEngine(config) { + @Override + IndexWriter createWriter(boolean create) throws IOException { + return (indexWriterSupplier != null) ? indexWriterSupplier.get() : super.createWriter(create); + } + }; if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { internalEngine.recoverFromTranslog(); } @@ -358,11 +375,11 @@ public class InternalEngineTests extends ESTestCase { // create two docs and refresh ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); Engine.Index first = new Engine.Index(newUid("1"), doc); - engine.index(first); + Engine.IndexResult firstResult = engine.index(first); ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null); Engine.Index second = new Engine.Index(newUid("2"), doc2); - engine.index(second); - assertThat(second.getTranslogLocation(), greaterThan(first.getTranslogLocation())); + Engine.IndexResult secondResult = engine.index(second); + assertThat(secondResult.getTranslogLocation(), greaterThan(firstResult.getTranslogLocation())); engine.refresh("test"); segments = engine.segments(false); @@ -691,7 +708,7 @@ public class InternalEngineTests extends ESTestCase { operations.add(operation); initialEngine.index(operation); } else { - final Engine.Delete operation = new Engine.Delete("test", "1", newUid("test#1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false); + final Engine.Delete operation = new Engine.Delete("test", "1", newUid("test#1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()); operations.add(operation); initialEngine.delete(operation); } @@ -1102,133 +1119,141 @@ public class InternalEngineTests extends ESTestCase { public void testVersioningNewCreate() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED); - engine.index(create); - assertThat(create.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(create); + assertThat(indexResult.getVersion(), equalTo(1L)); - create = new Engine.Index(newUid("1"), doc, create.seqNo(), create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(create); - assertThat(create.version(), equalTo(1L)); + create = new Engine.Index(newUid("1"), doc, indexResult.getSeqNo(), indexResult.getVersion(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(create); + assertThat(indexResult.getVersion(), equalTo(1L)); } public void testVersioningNewIndex() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); - index = new Engine.Index(newUid("1"), doc, index.seqNo(), index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - assertThat(index.version(), equalTo(1L)); + index = new Engine.Index(newUid("1"), doc, indexResult.getSeqNo(), indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); } public void testExternalVersioningNewIndex() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - engine.index(index); - assertThat(index.version(), equalTo(12L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(12L)); - index = new Engine.Index(newUid("1"), doc, index.seqNo(), index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - assertThat(index.version(), equalTo(12L)); + index = new Engine.Index(newUid("1"), doc, indexResult.getSeqNo(), indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(12L)); } public void testVersioningIndexConflict() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 1L, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // future versions should not work as well index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 3L, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testExternalVersioningIndexConflict() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); - Engine.Index index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - engine.index(index); - assertThat(index.version(), equalTo(12L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(12L)); index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 14, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - engine.index(index); - assertThat(index.version(), equalTo(14L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(14L)); index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 13, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); + } + + public void testForceVersioningNotAllowedExceptForOlderIndices() throws Exception { + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); + Engine.Index index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 42, VersionType.FORCE, PRIMARY, 0, -1, false); + + Engine.IndexResult indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(IllegalArgumentException.class)); + assertThat(indexResult.getFailure().getMessage(), containsString("version type [FORCE] may not be used for indices created after 6.0")); + + IndexSettings oldIndexSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0_beta1) + .build()); + try (Store store = createStore(); + Engine engine = createEngine(oldIndexSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 84, VersionType.FORCE, PRIMARY, 0, -1, false); + Engine.IndexResult result = engine.index(index); + assertTrue(result.hasFailure()); + assertThat(result.getFailure(), instanceOf(IllegalArgumentException.class)); + assertThat(result.getFailure().getMessage(), containsString("version type [FORCE] may not be used for non-translog operations")); + + index = new Engine.Index(newUid("1"), doc, randomIntBetween(0, 16), 84, VersionType.FORCE, + Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, 0, -1, false); + result = engine.index(index); + assertThat(result.getVersion(), equalTo(84L)); } } public void testVersioningIndexConflictWithFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); engine.flush(); index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 1L, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // future versions should not work as well index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 3L, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testExternalVersioningIndexConflictWithFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - engine.index(index); - assertThat(index.version(), equalTo(12L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(12L)); index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 14, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - engine.index(index); - assertThat(index.version(), equalTo(14L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(14L)); engine.flush(); index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 13, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testForceMerge() throws IOException { @@ -1329,255 +1354,203 @@ public class InternalEngineTests extends ESTestCase { public void testVersioningDeleteConflict() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); - Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 1L, VersionType.INTERNAL, PRIMARY, 0, false); - try { - engine.delete(delete); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 1L, VersionType.INTERNAL, PRIMARY, 0); + Engine.DeleteResult result = engine.delete(delete); + assertTrue(result.hasFailure()); + assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class)); // future versions should not work as well - delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 3L, VersionType.INTERNAL, PRIMARY, 0, false); - try { - engine.delete(delete); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 3L, VersionType.INTERNAL, PRIMARY, 0); + result = engine.delete(delete); + assertTrue(result.hasFailure()); + assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class)); // now actually delete - delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 2L, VersionType.INTERNAL, PRIMARY, 0, false); - engine.delete(delete); - assertThat(delete.version(), equalTo(3L)); + delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 2L, VersionType.INTERNAL, PRIMARY, 0); + result = engine.delete(delete); + assertThat(result.getVersion(), equalTo(3L)); // now check if we can index to a delete doc with version index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2L, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } - - // we shouldn't be able to create as well - Engine.Index create = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(create); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testVersioningDeleteConflictWithFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); engine.flush(); - Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 1L, VersionType.INTERNAL, PRIMARY, 0, false); - try { - engine.delete(delete); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 1L, VersionType.INTERNAL, PRIMARY, 0); + Engine.DeleteResult deleteResult = engine.delete(delete); + assertTrue(deleteResult.hasFailure()); + assertThat(deleteResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // future versions should not work as well - delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 3L, VersionType.INTERNAL, PRIMARY, 0, false); - try { - engine.delete(delete); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 3L, VersionType.INTERNAL, PRIMARY, 0); + deleteResult = engine.delete(delete); + assertTrue(deleteResult.hasFailure()); + assertThat(deleteResult.getFailure(), instanceOf(VersionConflictEngineException.class)); engine.flush(); // now actually delete - delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 2L, VersionType.INTERNAL, PRIMARY, 0, false); - engine.delete(delete); - assertThat(delete.version(), equalTo(3L)); + delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 2L, VersionType.INTERNAL, PRIMARY, 0); + deleteResult = engine.delete(delete); + assertThat(deleteResult.getVersion(), equalTo(3L)); engine.flush(); // now check if we can index to a delete doc with version index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2L, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } - - // we shouldn't be able to create as well - Engine.Index create = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(create); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testVersioningCreateExistsException() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index create = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - engine.index(create); - assertThat(create.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(create); + assertThat(indexResult.getVersion(), equalTo(1L)); create = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(create); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(create); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testVersioningCreateExistsExceptionWithFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index create = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - engine.index(create); - assertThat(create.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(create); + assertThat(indexResult.getVersion(), equalTo(1L)); engine.flush(); create = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(create); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(create); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testVersioningReplicaConflict1() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); // apply the second index to the replica, should work fine - index = new Engine.Index(newUid("1"), doc, index.seqNo(), index.version(), VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - assertThat(index.version(), equalTo(2L)); + index = new Engine.Index(newUid("1"), doc, indexResult.getSeqNo(), indexResult.getVersion(), VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); + long seqNo = indexResult.getSeqNo(); // now, the old one should not work - index = new Engine.Index(newUid("1"), doc, index.seqNo(), 1L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - - try { - replicaEngine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + index = new Engine.Index(newUid("1"), doc, seqNo, 1L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // second version on replica should fail as well - try { - index = new Engine.Index(newUid("1"), doc, index.seqNo(), 2L - , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - assertThat(index.version(), equalTo(2L)); - } catch (VersionConflictEngineException e) { - // all is well - } + index = new Engine.Index(newUid("1"), doc, seqNo, 2L + , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testVersioningReplicaConflict2() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); // apply the first index to the replica, should work fine - index = new Engine.Index(newUid("1"), doc, index.seqNo(), 1L + index = new Engine.Index(newUid("1"), doc, indexResult.getSeqNo(), 1L , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - assertThat(index.version(), equalTo(1L)); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); // index it again index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); // now delete it Engine.Delete delete = new Engine.Delete("test", "1", newUid("1")); - engine.delete(delete); - assertThat(delete.version(), equalTo(3L)); + Engine.DeleteResult deleteResult = engine.delete(delete); + assertThat(deleteResult.getVersion(), equalTo(3L)); // apply the delete on the replica (skipping the second index) - delete = new Engine.Delete("test", "1", newUid("1"), delete.seqNo(), 3L - , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, false); - replicaEngine.delete(delete); - assertThat(delete.version(), equalTo(3L)); + delete = new Engine.Delete("test", "1", newUid("1"), deleteResult.getSeqNo(), 3L + , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0); + deleteResult = replicaEngine.delete(delete); + assertThat(deleteResult.getVersion(), equalTo(3L)); // second time delete with same version should fail - try { - delete = new Engine.Delete("test", "1", newUid("1"), delete.seqNo(), 3L - , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, false); - replicaEngine.delete(delete); - fail("excepted VersionConflictEngineException to be thrown"); - } catch (VersionConflictEngineException e) { - // all is well - } + delete = new Engine.Delete("test", "1", newUid("1"), deleteResult.getSeqNo(), 3L + , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0); + deleteResult = replicaEngine.delete(delete); + assertTrue(deleteResult.hasFailure()); + assertThat(deleteResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // now do the second index on the replica, it should fail - try { - index = new Engine.Index(newUid("1"), doc, index.seqNo(), 2L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - fail("excepted VersionConflictEngineException to be thrown"); - } catch (VersionConflictEngineException e) { - // all is well - } + index = new Engine.Index(newUid("1"), doc, deleteResult.getSeqNo(), 2L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testBasicCreatedFlag() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertTrue(index.isCreated()); + Engine.IndexResult indexResult = engine.index(index); + assertTrue(indexResult.isCreated()); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertFalse(index.isCreated()); + indexResult = engine.index(index); + assertFalse(indexResult.isCreated()); engine.delete(new Engine.Delete(null, "1", newUid("1"))); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertTrue(index.isCreated()); + indexResult = engine.index(index); + assertTrue(indexResult.isCreated()); } public void testCreatedFlagAfterFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertTrue(index.isCreated()); + Engine.IndexResult indexResult = engine.index(index); + assertTrue(indexResult.isCreated()); engine.delete(new Engine.Delete(null, "1", newUid("1"))); engine.flush(); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertTrue(index.isCreated()); + indexResult = engine.index(index); + assertTrue(indexResult.isCreated()); } private static class MockAppender extends AbstractAppender { @@ -1610,6 +1583,7 @@ public class InternalEngineTests extends ESTestCase { public void testIndexWriterInfoStream() throws IllegalAccessException { assumeFalse("who tests the tester?", VERBOSE); MockAppender mockAppender = new MockAppender("testIndexWriterInfoStream"); + mockAppender.start(); Logger rootLogger = LogManager.getRootLogger(); Level savedLevel = rootLogger.getLevel(); @@ -1632,10 +1606,12 @@ public class InternalEngineTests extends ESTestCase { } finally { Loggers.removeAppender(rootLogger, mockAppender); + mockAppender.stop(); Loggers.setLevel(rootLogger, savedLevel); } } + @AwaitsFix(bugUrl = "// nocommit") public void testSeqNoAndCheckpoints() throws IOException { final int opCount = randomIntBetween(1, 256); long primarySeqNo = SequenceNumbersService.NO_OPS_PERFORMED; @@ -1661,7 +1637,7 @@ public class InternalEngineTests extends ESTestCase { id = randomFrom(indexedIds); final Engine.Delete delete = new Engine.Delete( "test", id, newUid("test#" + id), SequenceNumbersService.UNASSIGNED_SEQ_NO, - rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0, false); + rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0); try { initialEngine.delete(delete); indexedIds.remove(id); @@ -1865,6 +1841,7 @@ public class InternalEngineTests extends ESTestCase { public void testIndexWriterIFDInfoStream() throws IllegalAccessException { assumeFalse("who tests the tester?", VERBOSE); MockAppender mockAppender = new MockAppender("testIndexWriterIFDInfoStream"); + mockAppender.start(); final Logger iwIFDLogger = Loggers.getLogger("org.elasticsearch.index.engine.Engine.IFD"); @@ -1888,6 +1865,7 @@ public class InternalEngineTests extends ESTestCase { } finally { Loggers.removeAppender(iwIFDLogger, mockAppender); + mockAppender.stop(); Loggers.setLevel(iwIFDLogger, (Level) null); } } @@ -1905,7 +1883,7 @@ public class InternalEngineTests extends ESTestCase { engine.index(new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 1, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); // Delete document we just added: - engine.delete(new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false)); + engine.delete(new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); // Get should not find the document Engine.GetResult getResult = engine.get(new Engine.Get(true, newUid("1"))); @@ -1919,31 +1897,27 @@ public class InternalEngineTests extends ESTestCase { } // Delete non-existent document - engine.delete(new Engine.Delete("test", "2", newUid("2"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false)); + engine.delete(new Engine.Delete("test", "2", newUid("2"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); // Get should not find the document (we never indexed uid=2): getResult = engine.get(new Engine.Get(true, newUid("2"))); assertThat(getResult.exists(), equalTo(false)); // Try to index uid=1 with a too-old version, should fail: - try { - engine.index(new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); - fail("did not hit expected exception"); - } catch (VersionConflictEngineException vcee) { - // expected - } + Engine.Index index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); + Engine.IndexResult indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // Get should still not find the document getResult = engine.get(new Engine.Get(true, newUid("1"))); assertThat(getResult.exists(), equalTo(false)); // Try to index uid=2 with a too-old version, should fail: - try { - engine.index(new Engine.Index(newUid("2"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); - fail("did not hit expected exception"); - } catch (VersionConflictEngineException vcee) { - // expected - } + Engine.Index index1 = new Engine.Index(newUid("2"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); + indexResult = engine.index(index1); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // Get should not find the document getResult = engine.get(new Engine.Get(true, newUid("2"))); @@ -2039,8 +2013,8 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -2089,8 +2063,8 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -2114,6 +2088,7 @@ public class InternalEngineTests extends ESTestCase { return new Mapping(Version.CURRENT, root, new MetadataFieldMapper[0], emptyMap()); } + @AwaitsFix(bugUrl = "// nocommit") public void testUpgradeOldIndex() throws IOException { List indexes = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(getBwcIndicesPath(), "index-*.zip")) { @@ -2135,17 +2110,18 @@ public class InternalEngineTests extends ESTestCase { Path[] list = filterExtraFSFiles(FileSystemUtils.files(unzipDataDir)); if (list.length != 1) { - throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length + " " + Arrays.toString(list)); + throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length + + " " + Arrays.toString(list)); } + // the bwc scripts packs the indices under this path - Path src = list[0].resolve("nodes/0/indices/" + indexName); - Path translog = list[0].resolve("nodes/0/indices/" + indexName).resolve("0").resolve("translog"); - assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src)); + Path src = OldIndexUtils.getIndexDir(logger, indexName, indexFile.toString(), list[0]); + Path translog = src.resolve("0").resolve("translog"); assertTrue("[" + indexFile + "] missing translog dir: " + translog.toString(), Files.exists(translog)); Path[] tlogFiles = filterExtraFSFiles(FileSystemUtils.files(translog)); assertEquals(Arrays.toString(tlogFiles), tlogFiles.length, 2); // ckp & tlog Path tlogFile = tlogFiles[0].getFileName().toString().endsWith("tlog") ? tlogFiles[0] : tlogFiles[1]; - final long size = Files.size(tlogFiles[0]); + final long size = Files.size(tlogFile); logger.debug("upgrading index {} file: {} size: {}", indexName, tlogFiles[0].getFileName(), size); Directory directory = newFSDirectory(src.resolve("0").resolve("index")); Store store = createStore(directory); @@ -2179,8 +2155,8 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < numExtraDocs; i++) { ParsedDocument doc = testParsedDocument("extra" + Integer.toString(i), "extra" + Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -2208,8 +2184,8 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -2251,17 +2227,17 @@ public class InternalEngineTests extends ESTestCase { String uuidValue = "test#" + Integer.toString(randomId); ParsedDocument doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(uuidValue), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); if (flush) { engine.flush(); } doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index idxRequest = new Engine.Index(newUid(uuidValue), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(idxRequest); + Engine.IndexResult result = engine.index(idxRequest); engine.refresh("test"); - assertThat(idxRequest.version(), equalTo(2L)); + assertThat(result.getVersion(), equalTo(2L)); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1); assertThat(topDocs.totalHits, equalTo(numDocs + 1)); @@ -2290,7 +2266,7 @@ public class InternalEngineTests extends ESTestCase { public static class TranslogHandler extends TranslogRecoveryPerformer { - private final DocumentMapper docMapper; + private final MapperService mapperService; public Mapping mappingUpdate = null; public final AtomicInteger recoveredOps = new AtomicInteger(0); @@ -2298,22 +2274,20 @@ public class InternalEngineTests extends ESTestCase { public TranslogHandler(String indexName, Logger logger) { super(new ShardId("test", "_na_", 0), null, logger); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); - RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("test"); Index index = new Index(indexName, "_na_"); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings); - NamedAnalyzer defaultAnalyzer = new NamedAnalyzer("default", new StandardAnalyzer()); - IndexAnalyzers indexAnalyzers = - new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, Collections.emptyMap()); + NamedAnalyzer defaultAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); + IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, Collections.emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); - MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry, () -> null); - DocumentMapper.Builder b = new DocumentMapper.Builder(rootBuilder, mapperService); - this.docMapper = b.build(mapperService); + mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry, () -> null); } @Override protected DocumentMapperForType docMapper(String type) { - return new DocumentMapperForType(docMapper, mappingUpdate); + RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder(type); + DocumentMapper.Builder b = new DocumentMapper.Builder(rootBuilder, mapperService); + return new DocumentMapperForType(b.build(mapperService), mappingUpdate); } @Override @@ -2327,8 +2301,8 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult index = engine.index(firstIndexRequest); + assertThat(index.getVersion(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -2472,13 +2446,78 @@ public class InternalEngineTests extends ESTestCase { } } + public void testCheckDocumentFailure() throws Exception { + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); + Exception documentFailure = engine.checkIfDocumentFailureOrThrow(new Engine.Index(newUid("1"), doc), new IOException("simulated document failure")); + assertThat(documentFailure, instanceOf(IOException.class)); + try { + engine.checkIfDocumentFailureOrThrow(new Engine.Index(newUid("1"), doc), new CorruptIndexException("simulated environment failure", "")); + fail("expected exception to be thrown"); + } catch (Exception envirnomentException) { + assertThat(envirnomentException.getMessage(), containsString("simulated environment failure")); + } + } + + private static class ThrowingIndexWriter extends IndexWriter { + private boolean throwDocumentFailure; + + public ThrowingIndexWriter(Directory d, IndexWriterConfig conf) throws IOException { + super(d, conf); + } + + @Override + public long addDocument(Iterable doc) throws IOException { + if (throwDocumentFailure) { + throw new IOException("simulated"); + } else { + return super.addDocument(doc); + } + } + + @Override + public long deleteDocuments(Term... terms) throws IOException { + if (throwDocumentFailure) { + throw new IOException("simulated"); + } else { + return super.deleteDocuments(terms); + } + } + + public void setThrowDocumentFailure(boolean throwDocumentFailure) { + this.throwDocumentFailure = throwDocumentFailure; + } + } + + public void testHandleDocumentFailure() throws Exception { + try (Store store = createStore()) { + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); + ThrowingIndexWriter throwingIndexWriter = new ThrowingIndexWriter(store.directory(), new IndexWriterConfig()); + try (Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, () -> throwingIndexWriter)) { + // test document failure while indexing + throwingIndexWriter.setThrowDocumentFailure(true); + Engine.IndexResult indexResult = engine.index(randomAppendOnly(1, doc, false)); + assertNotNull(indexResult.getFailure()); + + throwingIndexWriter.setThrowDocumentFailure(false); + indexResult = engine.index(randomAppendOnly(1, doc, false)); + assertNull(indexResult.getFailure()); + + // test document failure while deleting + throwingIndexWriter.setThrowDocumentFailure(true); + Engine.DeleteResult deleteResult = engine.delete(new Engine.Delete("test", "", newUid("1"))); + assertNotNull(deleteResult.getFailure()); + } + } + + } + public void testDocStats() throws IOException { final int numDocs = randomIntBetween(2, 10); // at least 2 documents otherwise we don't see any deletes below for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } DocsStats docStats = engine.getDocStats(); assertEquals(numDocs, docStats.getCount()); @@ -2487,8 +2526,8 @@ public class InternalEngineTests extends ESTestCase { ParsedDocument doc = testParsedDocument(Integer.toString(0), Integer.toString(0), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(2L)); + Engine.IndexResult index = engine.index(firstIndexRequest); + assertThat(index.getVersion(), equalTo(2L)); engine.flush(); // flush - buffered deletes are not counted docStats = engine.getDocStats(); assertEquals(1, docStats.getDeleted()); @@ -2504,25 +2543,25 @@ public class InternalEngineTests extends ESTestCase { Engine.Index operation = randomAppendOnly(1, doc, false); Engine.Index retry = randomAppendOnly(1, doc, true); if (randomBoolean()) { - engine.index(operation); + Engine.IndexResult indexResult = engine.index(operation); assertFalse(engine.indexWriterHasDeletions()); assertEquals(0, engine.getNumVersionLookups()); - assertNotNull(operation.getTranslogLocation()); - engine.index(retry); + assertNotNull(indexResult.getTranslogLocation()); + Engine.IndexResult retryResult = engine.index(retry); assertTrue(engine.indexWriterHasDeletions()); assertEquals(0, engine.getNumVersionLookups()); - assertNotNull(retry.getTranslogLocation()); - assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) > 0); + assertNotNull(retryResult.getTranslogLocation()); + assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { - engine.index(retry); + Engine.IndexResult retryResult = engine.index(retry); assertTrue(engine.indexWriterHasDeletions()); assertEquals(0, engine.getNumVersionLookups()); - assertNotNull(retry.getTranslogLocation()); - engine.index(operation); + assertNotNull(retryResult.getTranslogLocation()); + Engine.IndexResult indexResult = engine.index(operation); assertTrue(engine.indexWriterHasDeletions()); assertEquals(0, engine.getNumVersionLookups()); - assertNotNull(retry.getTranslogLocation()); - assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) < 0); + assertNotNull(retryResult.getTranslogLocation()); + assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); } engine.refresh("test"); @@ -2533,17 +2572,17 @@ public class InternalEngineTests extends ESTestCase { operation = randomAppendOnly(1, doc, false); retry = randomAppendOnly(1, doc, true); if (randomBoolean()) { - engine.index(operation); - assertNotNull(operation.getTranslogLocation()); - engine.index(retry); - assertNotNull(retry.getTranslogLocation()); - assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) > 0); + Engine.IndexResult indexResult = engine.index(operation); + assertNotNull(indexResult.getTranslogLocation()); + Engine.IndexResult retryResult = engine.index(retry); + assertNotNull(retryResult.getTranslogLocation()); + assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { - engine.index(retry); - assertNotNull(retry.getTranslogLocation()); - engine.index(operation); - assertNotNull(retry.getTranslogLocation()); - assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) < 0); + Engine.IndexResult retryResult = engine.index(retry); + assertNotNull(retryResult.getTranslogLocation()); + Engine.IndexResult indexResult = engine.index(operation); + assertNotNull(retryResult.getTranslogLocation()); + assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); } engine.refresh("test"); @@ -2561,25 +2600,26 @@ public class InternalEngineTests extends ESTestCase { long autoGeneratedIdTimestamp = 0; Engine.Index index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); - index = new Engine.Index(newUid("1"), doc, index.seqNo(), index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - replicaEngine.index(index); - assertThat(index.version(), equalTo(1L)); + index = new Engine.Index(newUid("1"), doc, indexResult.getSeqNo(), indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); isRetry = true; index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); assertEquals(1, topDocs.totalHits); } - index = new Engine.Index(newUid("1"), doc, index.seqNo(), index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - replicaEngine.index(index); + index = new Engine.Index(newUid("1"), doc, indexResult.getSeqNo(), indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + indexResult = replicaEngine.index(index); + assertThat(indexResult.hasFailure(), equalTo(false)); replicaEngine.refresh("test"); try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); @@ -2593,26 +2633,25 @@ public class InternalEngineTests extends ESTestCase { boolean isRetry = true; long autoGeneratedIdTimestamp = 0; - Engine.Index firstIndexRequest = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult result = engine.index(firstIndexRequest); + assertThat(result.getVersion(), equalTo(1L)); - Engine.Index firstIndexRequestReplica = new Engine.Index(newUid("1"), doc, firstIndexRequest.seqNo(), firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - replicaEngine.index(firstIndexRequestReplica); - assertThat(firstIndexRequestReplica.version(), equalTo(1L)); + Engine.Index firstIndexRequestReplica = new Engine.Index(newUid("1"), doc, result.getSeqNo(), result.getVersion(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.IndexResult indexReplicaResult = replicaEngine.index(firstIndexRequestReplica); + assertThat(indexReplicaResult.getVersion(), equalTo(1L)); isRetry = false; Engine.Index secondIndexRequest = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - engine.index(secondIndexRequest); - assertTrue(secondIndexRequest.isCreated()); + Engine.IndexResult indexResult = engine.index(secondIndexRequest); + assertTrue(indexResult.isCreated()); engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); assertEquals(1, topDocs.totalHits); } - Engine.Index secondIndexRequestReplica = new Engine.Index(newUid("1"), doc, secondIndexRequest.seqNo(), firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.Index secondIndexRequestReplica = new Engine.Index(newUid("1"), doc, result.getSeqNo(), result.getVersion(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); replicaEngine.index(secondIndexRequestReplica); replicaEngine.refresh("test"); try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index d34ee5ccf1a..ac283a6184d 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -990,8 +990,8 @@ public class ShadowEngineTests extends ESTestCase { for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - primaryEngine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = primaryEngine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } DocsStats docStats = primaryEngine.getDocStats(); assertEquals(numDocs, docStats.getCount()); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java index 4e4d638d355..b1a3c9c0886 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java @@ -33,9 +33,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -/** - * - */ public abstract class AbstractGeoFieldDataTestCase extends AbstractFieldDataImplTestCase { @Override protected abstract String getFieldDataType(); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java index 741a1b54526..13f194a23ba 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -67,8 +67,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; -/** - */ public abstract class AbstractStringFieldDataTestCase extends AbstractFieldDataImplTestCase { private void addField(Document d, String name, String value) { d.add(new StringField(name, value, Field.Store.YES)); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java index 5f8beb12424..eb5b1f2b874 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java @@ -32,9 +32,6 @@ import org.elasticsearch.index.mapper.ParsedDocument; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class BinaryDVFieldDataTests extends AbstractFieldDataTestCase { @Override protected boolean hasDocValues() { diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java b/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java index bbc3a03f0a3..6a96fd01f0a 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java @@ -26,8 +26,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.greaterThan; -/** - */ public class FieldDataLoadingIT extends ESIntegTestCase { public void testEagerGlobalOrdinalsFieldDataLoading() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java index 25a232f68f9..a23456ce7ef 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.fielddata; -/** - */ public class PagedBytesStringFieldDataTests extends AbstractStringFieldDataTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java index e722e29bc42..1f23f726ef6 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java @@ -54,8 +54,6 @@ import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - */ public class ParentChildFieldDataTests extends AbstractFieldDataTestCase { private final String parentType = "parent"; private final String childType = "child"; diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java index 3f1e367952e..c594427b7e8 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java @@ -36,8 +36,6 @@ import java.util.Set; import static org.hamcrest.Matchers.equalTo; -/** - */ public class MultiOrdinalsTests extends ESTestCase { protected Ordinals creationMultiOrdinals(OrdinalsBuilder builder) { diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java index b699bb278c9..9eb73e93324 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java @@ -31,8 +31,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; -/** - */ public class SingleOrdinalsTests extends ESTestCase { public void testSvValues() throws IOException { int numDocs = 1000000; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java index 044f74e2712..989f1fa6835 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java @@ -30,10 +30,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.BinaryFieldMapper; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -48,8 +44,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -/** - */ public class BinaryFieldMapperTests extends ESSingleNodeTestCase { @Override @@ -102,7 +96,7 @@ public class BinaryFieldMapperTests extends ESSingleNodeTestCase { BytesRef indexedValue = doc.rootDoc().getBinaryValue("field"); assertEquals(new BytesRef(value), indexedValue); FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper("field"); - Object originalValue = fieldMapper.fieldType().valueForSearch(indexedValue); + Object originalValue = fieldMapper.fieldType().valueForDisplay(indexedValue); assertEquals(new BytesArray(value), originalValue); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java index 4f52fcd187d..d119a27f22e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java @@ -21,8 +21,6 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.search.TermQuery; -import org.elasticsearch.index.mapper.BooleanFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; import org.junit.Before; public class BooleanFieldTypeTests extends FieldTypeTestCase { @@ -44,11 +42,11 @@ public class BooleanFieldTypeTests extends FieldTypeTestCase { public void testValueForSearch() { MappedFieldType ft = createDefaultFieldType(); - assertEquals(true, ft.valueForSearch("T")); - assertEquals(false, ft.valueForSearch("F")); - expectThrows(IllegalArgumentException.class, () -> ft.valueForSearch(0)); - expectThrows(IllegalArgumentException.class, () -> ft.valueForSearch("true")); - expectThrows(IllegalArgumentException.class, () -> ft.valueForSearch("G")); + assertEquals(true, ft.valueForDisplay("T")); + assertEquals(false, ft.valueForDisplay("F")); + expectThrows(IllegalArgumentException.class, () -> ft.valueForDisplay(0)); + expectThrows(IllegalArgumentException.class, () -> ft.valueForDisplay("true")); + expectThrows(IllegalArgumentException.class, () -> ft.valueForDisplay("G")); } public void testTermQuery() { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CamelCaseFieldNameTests.java b/core/src/test/java/org/elasticsearch/index/mapper/CamelCaseFieldNameTests.java index d8894139cfa..7bf23fa4184 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/CamelCaseFieldNameTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/CamelCaseFieldNameTests.java @@ -25,9 +25,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.test.ESSingleNodeTestCase; -/** - * - */ public class CamelCaseFieldNameTests extends ESSingleNodeTestCase { public void testCamelCaseFieldNameStaysAsIs() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java index 11f8512fd25..3cf9527c627 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java @@ -34,8 +34,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -/** - */ public class CopyToMapperIntegrationIT extends ESIntegTestCase { public void testDynamicTemplateCopyTo() throws Exception { assertAcked( diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java index cd08ba98a88..0133e7e86c5 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java @@ -40,9 +40,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.startsWith; -/** - * - */ public class CopyToMapperTests extends ESSingleNodeTestCase { @SuppressWarnings("unchecked") public void testCopyToFieldsParsing() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CustomBoostMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/CustomBoostMappingTests.java index 9bc87e874f9..391f987e714 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/CustomBoostMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/CustomBoostMappingTests.java @@ -97,7 +97,7 @@ public class CustomBoostMappingTests extends ESSingleNodeTestCase { .startObject("date_field").field("type", "date").field("boost", 9.0f).endObject() .endObject().endObject().endObject().string(); IndexService indexService = createIndex("test", BW_SETTINGS); - QueryShardContext context = indexService.newQueryShardContext(); + QueryShardContext context = indexService.newQueryShardContext(0, null, () -> 0L); DocumentMapper mapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); DocumentFieldMappers fieldMappers = mapper.mappers(); assertThat(fieldMappers.getMapper("s_field").fieldType().termQuery("0", context), instanceOf(TermQuery.class)); @@ -150,7 +150,7 @@ public class CustomBoostMappingTests extends ESSingleNodeTestCase { .startObject("date_field").field("type", "date").field("boost", 9.0f).endObject() .endObject().endObject().endObject().string(); IndexService indexService = createIndex("text"); - QueryShardContext context = indexService.newQueryShardContext(); + QueryShardContext context = indexService.newQueryShardContext(0, null, () -> 0L); DocumentMapper mapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); DocumentFieldMappers fieldMappers = mapper.mappers(); assertThat(fieldMappers.getMapper("s_field").fieldType().termQuery("0", context), instanceOf(BoostQuery.class)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index a80f94845d1..cf6335c808a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -25,19 +25,15 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormat; import org.junit.Before; import java.io.IOException; @@ -354,4 +350,39 @@ public class DateFieldMapperTests extends ESSingleNodeTestCase { DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, defaultMapper.mappingSource().toString()); } + + /** + * Test that time zones are correctly parsed by the {@link DateFieldMapper}. + * There is a known bug with Joda 2.9.4 reported in https://github.com/JodaOrg/joda-time/issues/373. + */ + public void testTimeZoneParsing() throws Exception { + final String timeZonePattern = "yyyy-MM-dd" + randomFrom("ZZZ", "[ZZZ]", "'['ZZZ']'"); + + String mapping = XContentFactory.jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "date") + .field("format", timeZonePattern) + .endObject() + .endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + assertEquals(mapping, mapper.mappingSource().toString()); + + final DateTimeZone randomTimeZone = randomBoolean() ? DateTimeZone.forID(randomFrom("UTC", "CET")) : randomDateTimeZone(); + final DateTime randomDate = new DateTime(2016, 03, 11, 0, 0, 0, randomTimeZone); + + ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", DateTimeFormat.forPattern(timeZonePattern).print(randomDate)) + .endObject() + .bytes()); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + + assertEquals(randomDate.withZone(DateTimeZone.UTC).getMillis(), fields[0].numericValue().longValue()); + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java index bdeaa6921de..12fd641724e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java @@ -28,18 +28,21 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MultiReader; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.Joda; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.LegacyDateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; import org.elasticsearch.index.mapper.ParseContext.Document; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.QueryShardContext; import org.joda.time.DateTimeZone; import org.junit.Before; @@ -49,6 +52,8 @@ public class DateFieldTypeTests extends FieldTypeTestCase { return new DateFieldMapper.DateFieldType(); } + private static long nowInMillis; + @Before public void setupProperties() { setDummyNullValue(10); @@ -64,6 +69,7 @@ public class DateFieldTypeTests extends FieldTypeTestCase { ((DateFieldType) ft).setDateTimeFormatter(Joda.forPattern("date_optional_time", Locale.CANADA)); } }); + nowInMillis = randomPositiveLong(); } public void testIsFieldWithinQueryEmptyReader() throws IOException { @@ -71,38 +77,39 @@ public class DateFieldTypeTests extends FieldTypeTestCase { DateFieldType ft = new DateFieldType(); ft.setName("my_date"); assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, null)); } private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader, DateTimeZone zone, DateMathParser alternateFormat) throws IOException { + QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, null, () -> nowInMillis); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2016-01-02", "2016-06-20", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2016-01-02", "2016-02-12", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, "2014-01-02", "2015-02-12", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, "2016-05-11", "2016-08-30", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.WITHIN, ft.isFieldWithinQuery(reader, "2015-09-25", "2016-05-29", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.WITHIN, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", - true, true, null, null)); + true, true, null, null, context)); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", - false, false, null, null)); + false, false, null, null, context)); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", - false, true, null, null)); + false, true, null, null, context)); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", - true, false, null, null)); + true, false, null, null, context)); } public void testIsFieldWithinQuery() throws IOException { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); - long instant1 = LegacyDateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime("2015-10-12").getMillis(); - long instant2 = LegacyDateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime("2016-04-03").getMillis(); + long instant1 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime("2015-10-12").getMillis(); + long instant2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime("2016-04-03").getMillis(); Document doc = new Document(); LongPoint field = new LongPoint("my_date", instant1); doc.add(field); @@ -112,7 +119,7 @@ public class DateFieldTypeTests extends FieldTypeTestCase { DirectoryReader reader = DirectoryReader.open(w); DateFieldType ft = new DateFieldType(); ft.setName("my_date"); - DateMathParser alternateFormat = new DateMathParser(LegacyDateFieldMapper.Defaults.DATE_TIME_FORMATTER); + DateMathParser alternateFormat = new DateMathParser(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER); doTestIsFieldWithinQuery(ft, reader, null, null); doTestIsFieldWithinQuery(ft, reader, null, alternateFormat); doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, null); @@ -121,13 +128,13 @@ public class DateFieldTypeTests extends FieldTypeTestCase { // Fields with no value indexed. DateFieldType ft2 = new DateFieldType(); ft2.setName("my_date2"); - assertEquals(Relation.DISJOINT, ft2.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", false, false, null, null)); + assertEquals(Relation.DISJOINT, ft2.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", false, false, null, null, null)); IOUtils.close(reader, w, dir); } public void testValueFormat() { MappedFieldType ft = createDefaultFieldType(); - long instant = LegacyDateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime("2015-10-12T14:10:55").getMillis(); + long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime("2015-10-12T14:10:55").getMillis(); assertEquals("2015-10-12T14:10:55.000Z", ft.docValueFormat(null, DateTimeZone.UTC).format(instant)); assertEquals("2015-10-12T15:10:55.000+01:00", @@ -136,47 +143,58 @@ public class DateFieldTypeTests extends FieldTypeTestCase { createDefaultFieldType().docValueFormat("YYYY", DateTimeZone.UTC).format(instant)); assertEquals(instant, ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12T14:10:55", false, null)); - assertEquals(instant, + assertEquals(instant + 999, ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12T14:10:55", true, null)); - assertEquals(LegacyDateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime("2015-10-13").getMillis() - 1, + assertEquals(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime("2015-10-13").getMillis() - 1, ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12||/d", true, null)); } public void testValueForSearch() { MappedFieldType ft = createDefaultFieldType(); String date = "2015-10-12T12:09:55.000Z"; - long instant = LegacyDateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date).getMillis(); - assertEquals(date, ft.valueForSearch(instant)); + long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(date).getMillis(); + assertEquals(date, ft.valueForDisplay(instant)); } public void testTermQuery() { + Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build(); + QueryShardContext context = new QueryShardContext(0, + new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), + indexSettings), + null, null, null, null, null, null, null, null, null, () -> nowInMillis); MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); String date = "2015-10-12T14:10:55"; - long instant = LegacyDateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date).getMillis(); + long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(date).getMillis(); ft.setIndexOptions(IndexOptions.DOCS); - assertEquals(LongPoint.newExactQuery("field", instant), ft.termQuery(date, null)); + assertEquals(LongPoint.newRangeQuery("field", instant, instant + 999), ft.termQuery(date, context)); ft.setIndexOptions(IndexOptions.NONE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.termQuery(date, null)); + () -> ft.termQuery(date, context)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } public void testRangeQuery() throws IOException { + Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build(); + QueryShardContext context = new QueryShardContext(0, + new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings), + null, null, null, null, null, null, null, null, null, () -> nowInMillis); MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); String date1 = "2015-10-12T14:10:55"; String date2 = "2016-04-28T11:33:52"; - long instant1 = LegacyDateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date1).getMillis(); - long instant2 = LegacyDateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date2).getMillis(); + long instant1 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(date1).getMillis(); + long instant2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(date2).getMillis() + 999; ft.setIndexOptions(IndexOptions.DOCS); assertEquals(LongPoint.newRangeQuery("field", instant1, instant2), - ft.rangeQuery(date1, date2, true, true).rewrite(new MultiReader())); + ft.rangeQuery(date1, date2, true, true, context).rewrite(new MultiReader())); ft.setIndexOptions(IndexOptions.NONE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery(date1, date2, true, true)); + () -> ft.rangeQuery(date1, date2, true, true, context)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DocumentFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DocumentFieldMapperTests.java index 43fc1d5c82a..2c62a2952a8 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DocumentFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DocumentFieldMapperTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; import java.io.IOException; @@ -40,7 +41,7 @@ public class DocumentFieldMapperTests extends LuceneTestCase { private static class FakeAnalyzer extends Analyzer { private final String output; - + public FakeAnalyzer(String output) { this.output = output; } @@ -63,7 +64,7 @@ public class DocumentFieldMapperTests extends LuceneTestCase { }; return new TokenStreamComponents(tokenizer); } - + } static class FakeFieldType extends TermBasedFieldType { @@ -71,11 +72,11 @@ public class DocumentFieldMapperTests extends LuceneTestCase { public FakeFieldType() { super(); } - + FakeFieldType(FakeFieldType other) { super(other); } - + @Override public MappedFieldType clone() { return new FakeFieldType(this); @@ -85,7 +86,7 @@ public class DocumentFieldMapperTests extends LuceneTestCase { public String typeName() { return "fake"; } - + } static class FakeFieldMapper extends FieldMapper { @@ -104,15 +105,15 @@ public class DocumentFieldMapperTests extends LuceneTestCase { protected String contentType() { return null; } - + } public void testAnalyzers() throws IOException { FakeFieldType fieldType1 = new FakeFieldType(); fieldType1.setName("field1"); - fieldType1.setIndexAnalyzer(new NamedAnalyzer("foo", new FakeAnalyzer("index"))); - fieldType1.setSearchAnalyzer(new NamedAnalyzer("bar", new FakeAnalyzer("search"))); - fieldType1.setSearchQuoteAnalyzer(new NamedAnalyzer("baz", new FakeAnalyzer("search_quote"))); + fieldType1.setIndexAnalyzer(new NamedAnalyzer("foo", AnalyzerScope.INDEX, new FakeAnalyzer("index"))); + fieldType1.setSearchAnalyzer(new NamedAnalyzer("bar", AnalyzerScope.INDEX, new FakeAnalyzer("search"))); + fieldType1.setSearchQuoteAnalyzer(new NamedAnalyzer("baz", AnalyzerScope.INDEX, new FakeAnalyzer("search_quote"))); FieldMapper fieldMapper1 = new FakeFieldMapper("field1", fieldType1); FakeFieldType fieldType2 = new FakeFieldType(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java index 62189aaa87a..af217030f2c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java @@ -29,13 +29,11 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.test.ESSingleNodeTestCase; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class DoubleIndexingDocTests extends ESSingleNodeTestCase { public void testDoubleIndexingSameDoc() throws Exception { Directory dir = newDirectory(); @@ -47,6 +45,7 @@ public class DoubleIndexingDocTests extends ESSingleNodeTestCase { IndexService index = createIndex("test"); client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get(); DocumentMapper mapper = index.mapperService().documentMapper("type"); + QueryShardContext context = index.newQueryShardContext(0, null, () -> 0L); ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() @@ -67,25 +66,25 @@ public class DoubleIndexingDocTests extends ESSingleNodeTestCase { IndexReader reader = DirectoryReader.open(writer); IndexSearcher searcher = new IndexSearcher(reader); - TopDocs topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field1").fieldType().termQuery("value1", null), 10); + TopDocs topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field1").fieldType().termQuery("value1", context), 10); assertThat(topDocs.totalHits, equalTo(2)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field2").fieldType().termQuery("1", null), 10); + topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field2").fieldType().termQuery("1", context), 10); assertThat(topDocs.totalHits, equalTo(2)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field3").fieldType().termQuery("1.1", null), 10); + topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field3").fieldType().termQuery("1.1", context), 10); assertThat(topDocs.totalHits, equalTo(2)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field4").fieldType().termQuery("2010-01-01", null), 10); + topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field4").fieldType().termQuery("2010-01-01", context), 10); assertThat(topDocs.totalHits, equalTo(2)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("1", null), 10); + topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("1", context), 10); assertThat(topDocs.totalHits, equalTo(2)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("2", null), 10); + topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("2", context), 10); assertThat(topDocs.totalHits, equalTo(2)); - topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("3", null), 10); + topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("3", context), 10); assertThat(topDocs.totalHits, equalTo(2)); writer.close(); reader.close(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java index d634d8cd4fe..604c97332b6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java @@ -29,16 +29,19 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.MockTcpTransport; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.local.LocalTransport; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -54,7 +57,7 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { private static ThreadPool THREAD_POOL; private ClusterService clusterService; - private LocalTransport transport; + private Transport transport; private TransportService transportService; private IndicesService indicesService; private ShardStateAction shardStateAction; @@ -75,10 +78,11 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) .build(); clusterService = createClusterService(THREAD_POOL); - transport = new LocalTransport(settings, THREAD_POOL, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()); + transport = new MockTcpTransport(settings, THREAD_POOL, BigArrays.NON_RECYCLING_INSTANCE, + new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), + new NetworkService(settings, Collections.emptyList())); transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); indicesService = getInstanceFromNode(IndicesService.class); shardStateAction = new ShardStateAction(settings, clusterService, transportService, null, null, THREAD_POOL); actionFilters = new ActionFilters(Collections.emptySet()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index 9e4eb14e378..9eb5a78529f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -38,9 +38,6 @@ import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class DynamicTemplatesTests extends ESSingleNodeTestCase { public void testMatchTypeOnly() throws Exception { XContentBuilder builder = JsonXContent.contentBuilder(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index 6be11ced1e6..72f9d09808f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -39,13 +40,12 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Supplier; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -/** - */ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { @Override @@ -61,8 +61,11 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { Collections.singletonMap(ExternalMapperPlugin.EXTERNAL, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "foo")), Collections.singletonMap(ExternalMetadataMapper.CONTENT_TYPE, new ExternalMetadataMapper.TypeParser())); + Supplier queryShardContext = () -> { + return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent( XContentFactory.jsonBuilder().startObject().startObject("type") .startObject(ExternalMetadataMapper.CONTENT_TYPE) @@ -110,8 +113,11 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { mapperParsers.put(KeywordFieldMapper.CONTENT_TYPE, new KeywordFieldMapper.TypeParser()); MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap()); + Supplier queryShardContext = () -> { + return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent( XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") @@ -180,8 +186,11 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { mapperParsers.put(TextFieldMapper.CONTENT_TYPE, new TextFieldMapper.TypeParser()); MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap()); + Supplier queryShardContext = () -> { + return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent( XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldLevelBoostTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldLevelBoostTests.java index 2d451a36df8..42089752842 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldLevelBoostTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldLevelBoostTests.java @@ -39,8 +39,6 @@ import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -/** - */ public class FieldLevelBoostTests extends ESSingleNodeTestCase { private static final Settings BW_SETTINGS = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0).build(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index 3182e2a21f6..229c295ab19 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -38,6 +39,7 @@ import java.util.List; import java.util.Map; import java.util.SortedSet; import java.util.TreeSet; +import java.util.function.Supplier; public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { @@ -231,9 +233,12 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { Collections.singletonMap("_dummy", new DummyMetadataFieldMapper.TypeParser()) ); final MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); - MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + Supplier queryShardContext = () -> { + return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; + MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), mapperService, - indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); ParsedDocument parsedDocument = mapper.parse("index", "type", "id", new BytesArray("{}")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index 37d0436c9db..96ca2e72b95 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.similarity.BM25SimilarityProvider; import org.elasticsearch.test.ESTestCase; @@ -68,49 +69,49 @@ public abstract class FieldTypeTestCase extends ESTestCase { new Modifier("analyzer", false) { @Override public void modify(MappedFieldType ft) { - ft.setIndexAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + ft.setIndexAnalyzer(new NamedAnalyzer("bar", AnalyzerScope.INDEX, new StandardAnalyzer())); } }, new Modifier("analyzer", false) { @Override public void modify(MappedFieldType ft) { - ft.setIndexAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + ft.setIndexAnalyzer(new NamedAnalyzer("bar", AnalyzerScope.INDEX, new StandardAnalyzer())); } @Override public void normalizeOther(MappedFieldType other) { - other.setIndexAnalyzer(new NamedAnalyzer("foo", new StandardAnalyzer())); + other.setIndexAnalyzer(new NamedAnalyzer("foo", AnalyzerScope.INDEX, new StandardAnalyzer())); } }, new Modifier("search_analyzer", true) { @Override public void modify(MappedFieldType ft) { - ft.setSearchAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + ft.setSearchAnalyzer(new NamedAnalyzer("bar", AnalyzerScope.INDEX, new StandardAnalyzer())); } }, new Modifier("search_analyzer", true) { @Override public void modify(MappedFieldType ft) { - ft.setSearchAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + ft.setSearchAnalyzer(new NamedAnalyzer("bar", AnalyzerScope.INDEX, new StandardAnalyzer())); } @Override public void normalizeOther(MappedFieldType other) { - other.setSearchAnalyzer(new NamedAnalyzer("foo", new StandardAnalyzer())); + other.setSearchAnalyzer(new NamedAnalyzer("foo", AnalyzerScope.INDEX, new StandardAnalyzer())); } }, new Modifier("search_quote_analyzer", true) { @Override public void modify(MappedFieldType ft) { - ft.setSearchQuoteAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + ft.setSearchQuoteAnalyzer(new NamedAnalyzer("bar", AnalyzerScope.INDEX, new StandardAnalyzer())); } }, new Modifier("search_quote_analyzer", true) { @Override public void modify(MappedFieldType ft) { - ft.setSearchQuoteAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + ft.setSearchQuoteAnalyzer(new NamedAnalyzer("bar", AnalyzerScope.INDEX, new StandardAnalyzer())); } @Override public void normalizeOther(MappedFieldType other) { - other.setSearchQuoteAnalyzer(new NamedAnalyzer("foo", new StandardAnalyzer())); + other.setSearchQuoteAnalyzer(new NamedAnalyzer("foo", AnalyzerScope.INDEX, new StandardAnalyzer())); } }, new Modifier("similarity", false) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GenericStoreDynamicTemplateTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GenericStoreDynamicTemplateTests.java index 65ed7845af7..ec12a628f5f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/GenericStoreDynamicTemplateTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/GenericStoreDynamicTemplateTests.java @@ -32,9 +32,6 @@ import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class GenericStoreDynamicTemplateTests extends ESSingleNodeTestCase { public void testSimple() throws Exception { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index 7c4acb44039..a94ff589228 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -41,7 +41,6 @@ import org.hamcrest.CoreMatchers; import java.util.Collection; import java.util.List; import java.util.Map; -import java.lang.NumberFormatException; import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -84,7 +83,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { assertThat(doc.rootDoc().getField("point.lon"), notNullValue()); assertThat(doc.rootDoc().getField("point.lon").fieldType().stored(), is(stored)); assertThat(doc.rootDoc().getField("point.geohash"), nullValue()); - if (indexCreatedBefore22 == true) { + if (indexCreatedBefore22) { assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3")); } else { assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3))); @@ -256,85 +255,60 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .endObject() .bytes()); - try { + expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", -91).field("lon", 1.3).endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { + .bytes())); - } - - try { + expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", 91).field("lon", 1.3).endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { + .bytes())); - } - - try { + expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", 1.2).field("lon", -181).endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { + .bytes())); - } - - try { + expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", 1.2).field("lon", 181).endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { + .bytes())); - } - - try { + MapperParsingException e = expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", "-").field("lon", 1.3).endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { - assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); - assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); - } + .bytes())); + assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); + assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); - try { + e = expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", 1.2).field("lon", "-").endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { - assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); - assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); - } + .bytes())); + assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); + assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); - try { + e = expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", "-").field("lon", "-").endObject() .endObject() - .bytes()); - fail(); - } catch (MapperParsingException e) { - assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); - assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); - } + .bytes())); + assertThat(e.getRootCause(), instanceOf(NumberFormatException.class)); + assertThat(e.getRootCause().toString(), containsString("java.lang.NumberFormatException: For input string: \"-\"")); } public void testNoValidateLegacyLatLonValues() throws Exception { @@ -743,92 +717,84 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { } if (version.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { - try { - String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true).endObject().endObject() - .endObject().endObject().string(); - parser.parse("type", new CompressedXContent(normalizeMapping)); - } catch (MapperParsingException e) { - assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [geohash : true]"); - } + String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("point").field("type", "geo_point").field("geohash", true).endObject().endObject().endObject().endObject() + .string(); + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(normalizeMapping))); + assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [geohash : true]"); } - try { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("point").field("type", "geo_point"); + { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String validateMapping = xContentBuilder.field("validate", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(validateMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(validateMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate : true]"); } - try { + { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String validateMapping = xContentBuilder.field("validate_lat", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(validateMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(validateMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate_lat : true]"); } - try { + { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String validateMapping = xContentBuilder.field("validate_lon", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(validateMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(validateMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate_lon : true]"); } // test deprecated normalize - try { + { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String normalizeMapping = xContentBuilder.field("normalize", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(normalizeMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(normalizeMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize : true]"); } - try { + { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String normalizeMapping = xContentBuilder.field("normalize_lat", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(normalizeMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(normalizeMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize_lat : true]"); } - try { + { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); if (version.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) { xContentBuilder = xContentBuilder.field("lat_lon", true).field("geohash", true); } String normalizeMapping = xContentBuilder.field("normalize_lon", true).endObject().endObject().endObject().endObject().string(); - parser.parse("type", new CompressedXContent(normalizeMapping)); - fail("process completed successfully when " + MapperParsingException.class.getName() + " expected"); - } catch (MapperParsingException e) { + Exception e = expectThrows(MapperParsingException.class, () -> + parser.parse("type", new CompressedXContent(normalizeMapping))); assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize_lon : true]"); } } @@ -844,20 +810,17 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false) .field("geohash", false).endObject().endObject().endObject().endObject().string(); - try { - mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); - fail(); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("mapper [point] has different [lat_lon]")); - assertThat(e.getMessage(), containsString("mapper [point] has different [geohash]")); - assertThat(e.getMessage(), containsString("mapper [point] has different [geohash_precision]")); - } + Exception e = expectThrows(IllegalArgumentException.class, () -> + mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false)); + assertThat(e.getMessage(), containsString("mapper [point] has different [lat_lon]")); + assertThat(e.getMessage(), containsString("mapper [point] has different [geohash]")); + assertThat(e.getMessage(), containsString("mapper [point] has different [geohash_precision]")); // correct mapping and ensure no failures - stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + String stage2MappingCorrect = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true) .field("geohash", true).endObject().endObject().endObject().endObject().string(); - mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(stage2MappingCorrect), MapperService.MergeReason.MAPPING_UPDATE, false); } public void testLegacyGeoHashSearch() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java index 1cd9063da5f..b17722eaa76 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java @@ -31,7 +31,7 @@ public class IdFieldTypeTests extends FieldTypeTestCase { MappedFieldType ft = createDefaultFieldType(); ft.setName("_id"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery(null, null, randomBoolean(), randomBoolean())); + () -> ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null)); assertEquals("Field [_id] of type [_id] does not support range queries", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java index a5a1423d210..cbabb46191e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java @@ -25,8 +25,6 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.network.InetAddresses; -import org.elasticsearch.index.mapper.IpFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; public class IpFieldTypeTests extends FieldTypeTestCase { @Override @@ -49,11 +47,11 @@ public class IpFieldTypeTests extends FieldTypeTestCase { MappedFieldType ft = createDefaultFieldType(); String ip = "2001:db8::2:1"; BytesRef asBytes = new BytesRef(InetAddressPoint.encode(InetAddresses.forString(ip))); - assertEquals(ip, ft.valueForSearch(asBytes)); + assertEquals(ip, ft.valueForDisplay(asBytes)); ip = "192.168.1.7"; asBytes = new BytesRef(InetAddressPoint.encode(InetAddresses.forString(ip))); - assertEquals(ip, ft.valueForSearch(asBytes)); + assertEquals(ip, ft.valueForDisplay(asBytes)); } public void testTermQuery() { @@ -88,83 +86,83 @@ public class IpFieldTypeTests extends FieldTypeTestCase { InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE), - ft.rangeQuery(null, null, randomBoolean(), randomBoolean())); + ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0")), - ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true)); + ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255")), - ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false)); + ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE), - ft.rangeQuery("2001:db8::", null, true, randomBoolean())); + ft.rangeQuery("2001:db8::", null, true, randomBoolean(), null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE), - ft.rangeQuery("2001:db8::", null, false, randomBoolean())); + ft.rangeQuery("2001:db8::", null, false, randomBoolean(), null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff")), - ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true)); + ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe")), - ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false)); + ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::")), // same lo/hi values but inclusive=false so this won't match anything - ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false)); + ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false, null)); // Upper bound is the min IP and is not inclusive assertEquals(new MatchNoDocsQuery(), - ft.rangeQuery("::", "::", true, false)); + ft.rangeQuery("::", "::", true, false, null)); // Lower bound is the max IP and is not inclusive assertEquals(new MatchNoDocsQuery(), - ft.rangeQuery("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true)); + ft.rangeQuery("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff")), // same lo/hi values but inclusive=false so this won't match anything - ft.rangeQuery("::", "0.0.0.0", true, false)); + ft.rangeQuery("::", "0.0.0.0", true, false, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE), // same lo/hi values but inclusive=false so this won't match anything - ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true)); + ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null)); assertEquals( // lower bound is ipv4, upper bound is ipv6 InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::")), - ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true)); + ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true, null)); ft.setIndexOptions(IndexOptions.NONE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery("::1", "2001::", true, true)); + () -> ft.rangeQuery("::1", "2001::", true, true, null)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java index 817480db3d3..f43bf73a3d7 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java @@ -34,9 +34,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { public void testMergeMultiField() throws Exception { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java index 2f54af2601b..fbbabf8ee3a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java @@ -47,7 +47,7 @@ public class KeywordFieldTypeTests extends FieldTypeTestCase { assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(null, RandomStrings.randomAsciiOfLengthBetween(random(), 0, 5), RandomStrings.randomAsciiOfLengthBetween(random(), 0, 5), - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, null)); } public void testTermQuery() { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyByteFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyByteFieldTypeTests.java index 1f24acd4c20..2f3a4ca6fec 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyByteFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyByteFieldTypeTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.index.mapper.LegacyByteFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; import org.junit.Before; public class LegacyByteFieldTypeTests extends FieldTypeTestCase { @@ -36,6 +34,6 @@ public class LegacyByteFieldTypeTests extends FieldTypeTestCase { public void testValueForSearch() { MappedFieldType ft = createDefaultFieldType(); // bytes are stored as ints - assertEquals(Byte.valueOf((byte) 3), ft.valueForSearch(Integer.valueOf(3))); + assertEquals(Byte.valueOf((byte) 3), ft.valueForDisplay(Integer.valueOf(3))); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldMapperTests.java index 79703986c83..19f67c488b1 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldMapperTests.java @@ -40,12 +40,12 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.ParseContext.Document; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.TestSearchContext; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.junit.Before; @@ -235,6 +235,12 @@ public class LegacyDateFieldMapperTests extends ESSingleNodeTestCase { } public void testHourFormat() throws Exception { + long nowInMillis = randomPositiveLong(); + Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build(); + QueryShardContext context = new QueryShardContext(0, + new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings), null, null, null, null, + null, null, null, null, null, () -> nowInMillis); String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .field("date_detection", false) .startObject("properties").startObject("date_field").field("type", "date").field("format", "HH:mm:ss").endObject().endObject() @@ -249,18 +255,19 @@ public class LegacyDateFieldMapperTests extends ESSingleNodeTestCase { .bytes()); assertThat(((LegacyLongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(10).millis(), DateTimeZone.UTC).getMillis()))); - LegacyNumericRangeQuery rangeQuery; - try { - SearchContext.setCurrent(new TestSearchContext(null)); - rangeQuery = (LegacyNumericRangeQuery) defaultMapper.mappers().smartNameFieldMapper("date_field").fieldType().rangeQuery("10:00:00", "11:00:00", true, true).rewrite(null); - } finally { - SearchContext.removeCurrent(); - } - assertThat(rangeQuery.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(11).millis(), DateTimeZone.UTC).getMillis())); + LegacyNumericRangeQuery rangeQuery = (LegacyNumericRangeQuery) defaultMapper.mappers().smartNameFieldMapper("date_field").fieldType() + .rangeQuery("10:00:00", "11:00:00", true, true, context); + assertThat(rangeQuery.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(11).millis(), DateTimeZone.UTC).getMillis() + 999)); assertThat(rangeQuery.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(10).millis(), DateTimeZone.UTC).getMillis())); } public void testDayWithoutYearFormat() throws Exception { + long nowInMillis = randomPositiveLong(); + Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build(); + QueryShardContext context = new QueryShardContext(0, + new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings), null, null, null, null, + null, null, null, null, null, () -> nowInMillis); String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .field("date_detection", false) .startObject("properties").startObject("date_field").field("type", "date").field("format", "MMM dd HH:mm:ss").endObject().endObject() @@ -275,14 +282,9 @@ public class LegacyDateFieldMapperTests extends ESSingleNodeTestCase { .bytes()); assertThat(((LegacyLongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(34).millis(), DateTimeZone.UTC).getMillis()))); - LegacyNumericRangeQuery rangeQuery; - try { - SearchContext.setCurrent(new TestSearchContext(null)); - rangeQuery = (LegacyNumericRangeQuery) defaultMapper.mappers().smartNameFieldMapper("date_field").fieldType().rangeQuery("Jan 02 10:00:00", "Jan 02 11:00:00", true, true).rewrite(null); - } finally { - SearchContext.removeCurrent(); - } - assertThat(rangeQuery.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(35).millis(), DateTimeZone.UTC).getMillis())); + LegacyNumericRangeQuery rangeQuery = (LegacyNumericRangeQuery) defaultMapper.mappers().smartNameFieldMapper("date_field").fieldType() + .rangeQuery("Jan 02 10:00:00", "Jan 02 11:00:00", true, true, context); + assertThat(rangeQuery.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(35).millis() + 999, DateTimeZone.UTC).getMillis())); assertThat(rangeQuery.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(34).millis(), DateTimeZone.UTC).getMillis())); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldTypeTests.java index 03137ceffd1..10a2a331a79 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyDateFieldTypeTests.java @@ -29,11 +29,10 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.Joda; -import org.elasticsearch.index.mapper.LegacyDateFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.LegacyDateFieldMapper.DateFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; import org.elasticsearch.index.mapper.ParseContext.Document; +import org.elasticsearch.index.query.QueryRewriteContext; import org.joda.time.DateTimeZone; import org.junit.Before; @@ -47,6 +46,8 @@ public class LegacyDateFieldTypeTests extends FieldTypeTestCase { return new LegacyDateFieldMapper.DateFieldType(); } + private static long nowInMillis; + @Before public void setupProperties() { setDummyNullValue(10); @@ -68,6 +69,7 @@ public class LegacyDateFieldTypeTests extends FieldTypeTestCase { ((LegacyDateFieldMapper.DateFieldType)ft).setTimeUnit(TimeUnit.HOURS); } }); + nowInMillis = randomPositiveLong(); } public void testIsFieldWithinQueryEmptyReader() throws IOException { @@ -75,31 +77,32 @@ public class LegacyDateFieldTypeTests extends FieldTypeTestCase { DateFieldType ft = new DateFieldType(); ft.setName("my_date"); assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, null)); } private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader, DateTimeZone zone, DateMathParser alternateFormat) throws IOException { + QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, null, () -> nowInMillis); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2016-01-02", "2016-06-20", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2016-01-02", "2016-02-12", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, "2014-01-02", "2015-02-12", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, "2016-05-11", "2016-08-30", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.WITHIN, ft.isFieldWithinQuery(reader, "2015-09-25", "2016-05-29", - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.WITHIN, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", - true, true, null, null)); + true, true, null, null, context)); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", - false, false, null, null)); + false, false, null, null, context)); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", - false, true, null, null)); + false, true, null, null, context)); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03", - true, false, null, null)); + true, false, null, null, context)); } public void testIsFieldWithinQuery() throws IOException { @@ -135,7 +138,7 @@ public class LegacyDateFieldTypeTests extends FieldTypeTestCase { createDefaultFieldType().docValueFormat("YYYY", DateTimeZone.UTC).format(instant)); assertEquals(instant, ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12T14:10:55", false, null)); - assertEquals(instant, + assertEquals(instant + 999, ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12T14:10:55", true, null)); assertEquals(LegacyDateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime("2015-10-13").getMillis() - 1, ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12||/d", true, null)); @@ -145,6 +148,6 @@ public class LegacyDateFieldTypeTests extends FieldTypeTestCase { MappedFieldType ft = createDefaultFieldType(); String date = "2015-10-12T12:09:55.000Z"; long instant = LegacyDateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date).getMillis(); - assertEquals(date, ft.valueForSearch(instant)); + assertEquals(date, ft.valueForDisplay(instant)); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyDoubleFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyDoubleFieldTypeTests.java index 87feb396483..93ea0eb35fc 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyDoubleFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyDoubleFieldTypeTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.index.mapper.LegacyDoubleFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.LegacyDoubleFieldMapper.DoubleFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; import org.junit.Before; @@ -41,11 +39,11 @@ public class LegacyDoubleFieldTypeTests extends FieldTypeTestCase { DoubleFieldType ft = new DoubleFieldType(); // current impl ignores args and shourd always return INTERSECTS assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(null, randomDouble(), randomDouble(), - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, null)); } public void testValueForSearch() { MappedFieldType ft = createDefaultFieldType(); - assertEquals(Double.valueOf(1.2), ft.valueForSearch(1.2)); + assertEquals(Double.valueOf(1.2), ft.valueForDisplay(1.2)); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyFloatFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyFloatFieldTypeTests.java index 17da3c88ea4..a476c81fb47 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyFloatFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyFloatFieldTypeTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.index.mapper.LegacyFloatFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.LegacyFloatFieldMapper.FloatFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; import org.junit.Before; @@ -41,11 +39,11 @@ public class LegacyFloatFieldTypeTests extends FieldTypeTestCase { FloatFieldType ft = new FloatFieldType(); // current impl ignores args and shourd always return INTERSECTS assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(null, randomFloat(), randomFloat(), - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, null)); } public void testValueForSearch() { MappedFieldType ft = createDefaultFieldType(); - assertEquals(Float.valueOf(1.2f), ft.valueForSearch(1.2f)); + assertEquals(Float.valueOf(1.2f), ft.valueForDisplay(1.2f)); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyGeohashMappingGeoPointTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyGeohashMappingGeoPointTests.java index 426114cb389..a4d61956a6c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyGeohashMappingGeoPointTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyGeohashMappingGeoPointTests.java @@ -39,9 +39,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class LegacyGeohashMappingGeoPointTests extends ESSingleNodeTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyIntegerFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyIntegerFieldTypeTests.java index e0acbcdd127..7fd6cfcfba8 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyIntegerFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyIntegerFieldTypeTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.index.mapper.LegacyIntegerFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.LegacyIntegerFieldMapper.IntegerFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; import org.junit.Before; @@ -41,11 +39,11 @@ public class LegacyIntegerFieldTypeTests extends FieldTypeTestCase { IntegerFieldType ft = new IntegerFieldType(); // current impl ignores args and shourd always return INTERSECTS assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(null, randomInt(), randomInt(), - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, null)); } public void testValueForSearch() { MappedFieldType ft = createDefaultFieldType(); - assertEquals(Integer.valueOf(3), ft.valueForSearch(Integer.valueOf(3))); + assertEquals(Integer.valueOf(3), ft.valueForDisplay(Integer.valueOf(3))); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyIpFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyIpFieldMapperTests.java index af525318640..a78cb7a7177 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyIpFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyIpFieldMapperTests.java @@ -40,9 +40,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class LegacyIpFieldMapperTests extends ESSingleNodeTestCase { private static final Settings BW_SETTINGS = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0).build(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyLongFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyLongFieldTypeTests.java index 78d30462d22..2177bcff675 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyLongFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyLongFieldTypeTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.index.mapper.LegacyLongFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.LegacyLongFieldMapper.LongFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; import org.junit.Before; @@ -41,11 +39,11 @@ public class LegacyLongFieldTypeTests extends FieldTypeTestCase { LongFieldType ft = new LongFieldType(); // current impl ignores args and shourd always return INTERSECTS assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(null, randomLong(), randomLong(), - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, null)); } public void testValueForSearch() { MappedFieldType ft = createDefaultFieldType(); - assertEquals(Long.valueOf(3), ft.valueForSearch(Long.valueOf(3))); + assertEquals(Long.valueOf(3), ft.valueForDisplay(Long.valueOf(3))); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapperTests.java index 46bd89f83e0..1ce13d5137a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyNumberFieldMapperTests.java @@ -55,8 +55,6 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - */ public class LegacyNumberFieldMapperTests extends ESSingleNodeTestCase { private static final Settings BW_SETTINGS = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0).build(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyShortFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyShortFieldTypeTests.java index be5a6a4e0c3..2e22bac6e95 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyShortFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyShortFieldTypeTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.index.mapper.LegacyShortFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; import org.junit.Before; public class LegacyShortFieldTypeTests extends FieldTypeTestCase { @@ -36,6 +34,6 @@ public class LegacyShortFieldTypeTests extends FieldTypeTestCase { public void testValueForSearch() { MappedFieldType ft = createDefaultFieldType(); // shorts are stored as ints - assertEquals(Short.valueOf((short) 3), ft.valueForSearch(Integer.valueOf(3))); + assertEquals(Short.valueOf((short) 3), ft.valueForDisplay(Integer.valueOf(3))); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/LegacyStringMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/LegacyStringMappingTests.java index 3c07ec4b90c..a1583098292 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/LegacyStringMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/LegacyStringMappingTests.java @@ -60,8 +60,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - */ public class LegacyStringMappingTests extends ESSingleNodeTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java new file mode 100644 index 00000000000..f4e83dde46a --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +public class MapperTests extends ESTestCase { + + public void testSuccessfulBuilderContext() { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + ContentPath contentPath = new ContentPath(1); + Mapper.BuilderContext context = new Mapper.BuilderContext(settings, contentPath); + + assertEquals(settings, context.indexSettings()); + assertEquals(contentPath, context.path()); + } + + public void testBuilderContextWithIndexSettingsAsNull() { + NullPointerException e = expectThrows(NullPointerException.class, () -> new Mapper.BuilderContext(null, new ContentPath(1))); + } + + +} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java index 4bf1995722b..5c70465d77f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java @@ -52,9 +52,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; -/** - * - */ public class MultiFieldTests extends ESSingleNodeTestCase { public void testMultiFieldMultiFields() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java index 8711ead6edf..1df01218a50 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java @@ -42,8 +42,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - */ public class MultiFieldsIntegrationIT extends ESIntegTestCase { public void testMultiFields() throws Exception { assertAcked( diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 89a42a884c6..ae306009f25 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -22,13 +22,8 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.ObjectMapper.Dynamic; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; @@ -366,22 +361,16 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase { createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); // explicitly setting limit to 0 prevents nested fields - try { + Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test2", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Limit of nested fields [0] in index [test2] has been exceeded")); - } + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false)); + assertThat(e.getMessage(), containsString("Limit of nested fields [0] in index [test2] has been exceeded")); // setting limit to 1 with 2 nested fields fails - try { + e = expectThrows(IllegalArgumentException.class, () -> createIndex("test3", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 1).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Limit of nested fields [1] in index [test3] has been exceeded")); - } + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false)); + assertThat(e.getMessage(), containsString("Limit of nested fields [1] in index [test3] has been exceeded")); MapperService mapperService = createIndex("test4", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 2) .build()).mapperService(); @@ -391,12 +380,9 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase { // adding new fields from different type is not ok String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type3").startObject("properties").startObject("nested3") .field("type", "nested").startObject("properties").endObject().endObject().endObject().endObject().endObject().string(); - try { - mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Limit of nested fields [2] in index [test4] has been exceeded")); - } + e = expectThrows(IllegalArgumentException.class, () -> + mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false)); + assertThat(e.getMessage(), containsString("Limit of nested fields [2] in index [test4] has been exceeded")); // do not check nested fields limit if mapping is not updated createIndex("test5", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java index 774e122e13f..d48fc3c0b6c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java @@ -29,9 +29,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class NullValueObjectMappingTests extends ESSingleNodeTestCase { public void testNullValueObject() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NullValueTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NullValueTests.java index 0880833716e..bc054564a68 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NullValueTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NullValueTests.java @@ -30,8 +30,6 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import static org.hamcrest.Matchers.equalTo; -/** - */ public class NullValueTests extends ESSingleNodeTestCase { public void testNullNullValue() throws Exception { IndexService indexService = createIndex("test", Settings.builder().build()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java index c887565ae69..d7e178404f1 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java @@ -59,7 +59,7 @@ public class NumberFieldTypeTests extends FieldTypeTestCase { MappedFieldType ft = createDefaultFieldType(); // current impl ignores args and should always return INTERSECTS assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(null, randomDouble(), randomDouble(), - randomBoolean(), randomBoolean(), null, null)); + randomBoolean(), randomBoolean(), null, null, null)); } public void testTermQuery() { @@ -78,11 +78,11 @@ public class NumberFieldTypeTests extends FieldTypeTestCase { MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); ft.setName("field"); ft.setIndexOptions(IndexOptions.DOCS); - assertEquals(LongPoint.newRangeQuery("field", 1, 3), ft.rangeQuery("1", "3", true, true)); + assertEquals(LongPoint.newRangeQuery("field", 1, 3), ft.rangeQuery("1", "3", true, true, null)); ft.setIndexOptions(IndexOptions.NONE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery("1", "3", true, true)); + () -> ft.rangeQuery("1", "3", true, true, null)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java index 078c60ce19b..9f026c59922 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -103,7 +104,7 @@ public class ParentFieldMapperTests extends ESSingleNodeTestCase { public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception { Index index = new Index("_index", "testUUID"); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY); - NamedAnalyzer namedAnalyzer = new NamedAnalyzer("default", new StandardAnalyzer()); + NamedAnalyzer namedAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, namedAnalyzer, namedAnalyzer, namedAnalyzer, Collections.emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/PathMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/PathMapperTests.java index 1f6fb29e4c1..271501281cd 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/PathMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/PathMapperTests.java @@ -29,9 +29,6 @@ import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class PathMapperTests extends ESSingleNodeTestCase { public void testPathMapping() throws IOException { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/path/test-mapping.json"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/PathMatchDynamicTemplateTests.java b/core/src/test/java/org/elasticsearch/index/mapper/PathMatchDynamicTemplateTests.java index 584ba2daf1d..98056871017 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/PathMatchDynamicTemplateTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/PathMatchDynamicTemplateTests.java @@ -32,9 +32,6 @@ import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class PathMatchDynamicTemplateTests extends ESSingleNodeTestCase { public void testSimple() throws Exception { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java index 59571b70231..fd390c5da8a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java @@ -38,9 +38,6 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.ScaledFloatFieldMapper; import org.junit.Before; import java.io.IOException; @@ -122,7 +119,7 @@ public class ScaledFloatFieldTypeTests extends FieldTypeTestCase { boolean includeLower = randomBoolean(); boolean includeUpper = randomBoolean(); Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper); - Query scaledFloatQ = ft.rangeQuery(l, u, includeLower, includeUpper); + Query scaledFloatQ = ft.rangeQuery(l, u, includeLower, includeUpper, null); assertEquals(searcher.count(doubleQ), searcher.count(scaledFloatQ)); } IOUtils.close(reader, dir); @@ -132,8 +129,8 @@ public class ScaledFloatFieldTypeTests extends FieldTypeTestCase { ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType(); ft.setName("scaled_float"); ft.setScalingFactor(0.1 + randomDouble() * 100); - assertNull(ft.valueForSearch(null)); - assertEquals(10/ft.getScalingFactor(), ft.valueForSearch(10L)); + assertNull(ft.valueForDisplay(null)); + assertEquals(10/ft.getScalingFactor(), ft.valueForDisplay(10L)); } public void testStats() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java b/core/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java index 4bfdd07d455..47b062a42df 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java @@ -38,9 +38,6 @@ import java.util.Collections; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class StoredNumericValuesTests extends ESSingleNodeTestCase { public void testBytesAndNumericRepresentation() throws Exception { IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldMapperTests.java index 2b87bb10b54..6b156fa36e1 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldMapperTests.java @@ -30,20 +30,11 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.joda.Joda; -import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.mapper.TimestampFieldMapper; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -57,7 +48,6 @@ import java.util.Collection; import java.util.LinkedHashMap; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -67,8 +57,6 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; -/** - */ public class TimestampFieldMapperTests extends ESSingleNodeTestCase { private static final Settings BW_SETTINGS = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0).build(); @@ -214,12 +202,9 @@ public class TimestampFieldMapperTests extends ESSingleNodeTestCase { .field("default", (String) null) .endObject() .endObject().endObject(); - try { - createIndex("test", BW_SETTINGS).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping.string())); - fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set to null"); - } catch (TimestampParsingException e) { - assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null")); - } + TimestampParsingException e = expectThrows(TimestampParsingException.class, () -> createIndex("test", BW_SETTINGS).mapperService() + .documentMapperParser().parse("type", new CompressedXContent(mapping.string()))); + assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null")); } // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null] @@ -231,12 +216,9 @@ public class TimestampFieldMapperTests extends ESSingleNodeTestCase { .endObject() .endObject().endObject(); - try { - createIndex("test", BW_SETTINGS).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping.string())); - fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set to null"); - } catch (TimestampParsingException e) { - assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null")); - } + TimestampParsingException e = expectThrows(TimestampParsingException.class, () -> createIndex("test", BW_SETTINGS).mapperService() + .documentMapperParser().parse("type", new CompressedXContent(mapping.string()))); + assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null")); } // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null] @@ -249,12 +231,9 @@ public class TimestampFieldMapperTests extends ESSingleNodeTestCase { .endObject() .endObject().endObject(); - try { - createIndex("test", BW_SETTINGS).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping.string())); - fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set with ignore_missing set to false"); - } catch (TimestampParsingException e) { - assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set with ignore_missing set to false")); - } + TimestampParsingException e = expectThrows(TimestampParsingException.class, () -> createIndex("test", BW_SETTINGS).mapperService() + .documentMapperParser().parse("type", new CompressedXContent(mapping.string()))); + assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set with ignore_missing set to false")); } // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null] diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldTypeTests.java index c17d60c8145..53c0c89f8c3 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/TimestampFieldTypeTests.java @@ -18,10 +18,6 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.TimestampFieldMapper; - public class TimestampFieldTypeTests extends LegacyDateFieldTypeTests { @Override protected MappedFieldType createDefaultFieldType() { @@ -33,6 +29,6 @@ public class TimestampFieldTypeTests extends LegacyDateFieldTypeTests { MappedFieldType ft = createDefaultFieldType(); String date = "2015-10-12T12:09:55.000Z"; long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(date).getMillis(); - assertEquals(instant, ft.valueForSearch(instant)); + assertEquals(instant, ft.valueForDisplay(instant)); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java index 54217ea94ae..3c80f095f83 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java @@ -34,11 +34,12 @@ import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.TypeFieldMapper; import org.junit.Before; +import java.io.IOException; + public class TypeFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { @@ -56,20 +57,14 @@ public class TypeFieldTypeTests extends FieldTypeTestCase { }); } - public void testTermQuery() throws Exception { + public void testTermsQuery() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); - Document doc = new Document(); - StringField type = new StringField(TypeFieldMapper.NAME, "my_type", Store.NO); - doc.add(type); - w.addDocument(doc); - w.addDocument(doc); - IndexReader reader = DirectoryReader.open(w); + IndexReader reader = openReaderWithNewType("my_type", w); TypeFieldMapper.TypeFieldType ft = new TypeFieldMapper.TypeFieldType(); ft.setName(TypeFieldMapper.NAME); Query query = ft.termQuery("my_type", null); - assertEquals(new MatchAllDocsQuery(), query.rewrite(reader)); // Make sure that Lucene actually simplifies the query when there is a single type @@ -78,13 +73,55 @@ public class TypeFieldTypeTests extends FieldTypeTestCase { Query rewritten = new IndexSearcher(reader).rewrite(filteredQuery); assertEquals(userQuery, rewritten); - type.setStringValue("my_type2"); - w.addDocument(doc); + // ... and does not rewrite it if there is more than one type reader.close(); - reader = DirectoryReader.open(w); + reader = openReaderWithNewType("my_type2", w); + Query expected = new ConstantScoreQuery( + new BooleanQuery.Builder() + .add(new TermQuery(new Term(TypeFieldMapper.NAME, "my_type")), Occur.SHOULD) + .build() + ); + assertEquals(expected, query.rewrite(reader)); - assertEquals(new ConstantScoreQuery(new TermQuery(new Term(TypeFieldMapper.NAME, "my_type"))), query.rewrite(reader)); + BytesRef[] types = + new BytesRef[] {new BytesRef("my_type"), new BytesRef("my_type2"), new BytesRef("my_type3")}; + // the query should match all documents + query = new TypeFieldMapper.TypesQuery(types); + assertEquals(new MatchAllDocsQuery(), query.rewrite(reader)); + + reader.close(); + reader = openReaderWithNewType("unknown_type", w); + // the query cannot rewrite to a match all docs sinc unknown_type is not queried. + query = new TypeFieldMapper.TypesQuery(types); + expected = + new ConstantScoreQuery( + new BooleanQuery.Builder() + .add(new TermQuery(new Term(TypeFieldMapper.CONTENT_TYPE, types[0])), Occur.SHOULD) + .add(new TermQuery(new Term(TypeFieldMapper.CONTENT_TYPE, types[1])), Occur.SHOULD) + .build() + ); + rewritten = query.rewrite(reader); + assertEquals(expected, rewritten); + + // make sure that redundant types does not rewrite to MatchAllDocsQuery + query = new TypeFieldMapper.TypesQuery(new BytesRef("my_type"), new BytesRef("my_type"), new BytesRef("my_type")); + expected = + new ConstantScoreQuery( + new BooleanQuery.Builder() + .add(new TermQuery(new Term(TypeFieldMapper.CONTENT_TYPE, "my_type")), Occur.SHOULD) + .build() + ); + rewritten = query.rewrite(reader); + assertEquals(expected, rewritten); IOUtils.close(reader, w, dir); } + + static DirectoryReader openReaderWithNewType(String type, IndexWriter writer) throws IOException { + Document doc = new Document(); + StringField typeField = new StringField(TypeFieldMapper.NAME, type, Store.NO); + doc.add(typeField); + writer.addDocument(doc); + return DirectoryReader.open(writer); + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java index 4e7b2d5fa84..803c7013d45 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java @@ -31,7 +31,7 @@ public class UidFieldTypeTests extends FieldTypeTestCase { MappedFieldType ft = createDefaultFieldType(); ft.setName("_uid"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery(null, null, randomBoolean(), randomBoolean())); + () -> ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null)); assertEquals("Field [_uid] of type [_uid] does not support range queries", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java index 5e63c3868a7..c5c77e027ac 100644 --- a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import org.hamcrest.Matchers; @@ -80,10 +81,11 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase clauses = new ArrayList<>(); clauses.addAll(getBooleanClauses(queryBuilder.must(), BooleanClause.Occur.MUST, context)); clauses.addAll(getBooleanClauses(queryBuilder.mustNot(), BooleanClause.Occur.MUST_NOT, context)); @@ -346,9 +348,14 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase parseQuery(query, ParseFieldMatcher.STRICT)); assertThat(ex.getMessage(), startsWith("query malformed, empty clause found at")); + checkWarningHeaders("query malformed, empty clause found at [1:27]"); } /** diff --git a/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java index 86592847e95..a4658fbbf23 100644 --- a/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java @@ -24,14 +24,15 @@ import org.apache.lucene.search.Query; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; import java.util.Optional; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.CoreMatchers.startsWith;; +import static org.hamcrest.CoreMatchers.nullValue;; public class BoostingQueryBuilderTests extends AbstractQueryTestCase { @@ -43,9 +44,9 @@ public class BoostingQueryBuilderTests extends AbstractQueryTestCase innerQueryBuilder = context.parseInnerQueryBuilder(); assertTrue(innerQueryBuilder.isPresent() == false); + checkWarningHeaders("query malformed, empty clause found at [1:36]"); + query = - "{ \"boosting\" : {" + - " \"positive\" : { \"match_all\" : {} }, " + - " \"negative\" : { }, " + - " \"negative_boost\" : 23.0" + - " }" + + "{ \"boosting\" : {\n" + + " \"positive\" : { \"match_all\" : {} },\n" + + " \"negative\" : { },\n" + + " \"negative_boost\" : 23.0\n" + + " }\n" + "}"; parser = XContentFactory.xContent(query).createParser(query); context = createParseContext(parser, ParseFieldMatcher.EMPTY); innerQueryBuilder = context.parseInnerQueryBuilder(); assertTrue(innerQueryBuilder.isPresent() == false); + checkWarningHeaders("query malformed, empty clause found at [3:20]"); + parser = XContentFactory.xContent(query).createParser(query); QueryParseContext otherContext = createParseContext(parser, ParseFieldMatcher.STRICT); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> otherContext.parseInnerQueryBuilder()); - assertThat(ex.getMessage(), startsWith("query malformed, empty clause found at")); + assertThat(ex.getMessage(), equalTo("query malformed, empty clause found at [3:20]")); + checkWarningHeaders("query malformed, empty clause found at [3:20]"); } public void testRewrite() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java index 057fcf36d72..9718ca78197 100644 --- a/core/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -99,7 +100,7 @@ public class CommonTermsQueryBuilderTests extends AbstractQueryTestCase { @@ -45,8 +46,8 @@ public class ConstantScoreQueryBuilderTests extends AbstractQueryTestCase innerQueryBuilder = context.parseInnerQueryBuilder(); assertTrue(innerQueryBuilder.isPresent() == false); + checkWarningHeaders("query malformed, empty clause found at [1:40]"); parser = XContentFactory.xContent(query).createParser(query); QueryParseContext otherContext = createParseContext(parser, ParseFieldMatcher.STRICT); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> otherContext.parseInnerQueryBuilder()); - assertThat(ex.getMessage(), startsWith("query malformed, empty clause found at")); + assertThat(ex.getMessage(), equalTo("query malformed, empty clause found at [1:40]")); + checkWarningHeaders("query malformed, empty clause found at [1:40]"); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java index 8fa8724ae3c..114109736ea 100644 --- a/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java @@ -22,10 +22,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -57,8 +58,8 @@ public class DisMaxQueryBuilderTests extends AbstractQueryTestCase queries = AbstractQueryBuilder.toQueries(queryBuilder.innerQueries(), context); + protected void doAssertLuceneQuery(DisMaxQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { + Collection queries = AbstractQueryBuilder.toQueries(queryBuilder.innerQueries(), context.getQueryShardContext()); assertThat(query, instanceOf(DisjunctionMaxQuery.class)); DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query; assertThat(disjunctionMaxQuery.getTieBreakerMultiplier(), equalTo(queryBuilder.tieBreaker())); @@ -97,7 +98,8 @@ public class DisMaxQueryBuilderTests extends AbstractQueryTestCase fields = context.simpleMatchToIndexNames(fieldPattern); + Collection fields = context.getQueryShardContext().simpleMatchToIndexNames(fieldPattern); if (getCurrentTypes().length == 0) { assertThat(query, instanceOf(MatchNoDocsQuery.class)); MatchNoDocsQuery matchNoDocsQuery = (MatchNoDocsQuery) query; diff --git a/core/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java index e6cefdb67af..924e6724062 100644 --- a/core/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilderTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; import org.apache.lucene.search.spans.FieldMaskingSpanQuery; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -43,16 +44,16 @@ public class FieldMaskingSpanQueryBuilderTests extends AbstractQueryTestCase { + private boolean testSkipped = false; + + /** + * All tests create deprecation warnings when an new FuzzyQueryBuilder is created. Instead of having to check them once + * in every single test, this is done here after each test is run + */ + @After + void checkWarningHeaders() throws IOException { + // only check that warning headers got created for tests that satisfied certain assumptions and were thus not skipped + if (testSkipped == false) { + checkWarningHeaders("fuzzy query is deprecated. Instead use the [match] query with fuzziness parameter"); + } + } + @Override protected FuzzyQueryBuilder doCreateTestQueryBuilder() { FuzzyQueryBuilder query = new FuzzyQueryBuilder(STRING_FIELD_NAME, getRandomValueForFieldName(STRING_FIELD_NAME)); @@ -72,7 +89,7 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase 0); + try { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + } catch (AssumptionViolatedException e) { + // we need to know that this test was skipped in @After checkWarningHeaders(), because no warnings will be generated + testSkipped = true; + throw e; + } String query = "{\n" + " \"fuzzy\":{\n" + " \"" + STRING_FIELD_NAME + "\":{\n" + @@ -120,7 +143,13 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase 0); + try { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + } catch (AssumptionViolatedException e) { + // we need to know that this test was skipped in @After checkWarningHeaders(), because no warnings will be generated + testSkipped = true; + throw e; + } String query = "{\n" + " \"fuzzy\":{\n" + " \"" + INT_FIELD_NAME + "\":{\n" + @@ -156,7 +185,16 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json2)); assertEquals("[fuzzy] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); String shortJson = "{\n" + diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java index 9c9a80e63ca..71be96c1688 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.search.geo.LegacyInMemoryGeoBoundingBoxQuery; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.locationtech.spatial4j.io.GeohashUtils; @@ -254,8 +255,9 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase } @Override - protected void doAssertLuceneQuery(Builder queryBuilder, Query query, QueryShardContext context) throws IOException { + protected void doAssertLuceneQuery(Builder queryBuilder, Query query, SearchContext context) throws IOException { if (queryBuilder.neighbors()) { assertThat(query, instanceOf(TermsQuery.class)); } else { diff --git a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java index d26dd477a85..aa0b82873fa 100644 --- a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -58,7 +59,6 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.startsWith; public class HasChildQueryBuilderTests extends AbstractQueryTestCase { protected static final String PARENT_TYPE = "parent"; @@ -120,7 +120,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase innerHitBuilders = new HashMap<>(); InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders); for (InnerHitBuilder builder : innerHitBuilders.values()) { @@ -243,7 +241,8 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase otherContext.parseInnerQueryBuilder()); - assertThat(ex.getMessage(), startsWith("query malformed, empty clause found at")); + assertThat(ex.getMessage(), equalTo("query malformed, empty clause found at [3:17]")); + checkWarningHeaders("query malformed, empty clause found at [3:17]"); } public void testToQueryInnerQueryType() throws IOException { @@ -282,7 +281,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase { protected static final String PARENT_TYPE = "parent"; @@ -100,7 +99,7 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase innerHitBuilders = new HashMap<>(); InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders); @@ -158,6 +156,7 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase innerQueryBuilder = context.parseInnerQueryBuilder(); assertTrue(innerQueryBuilder.isPresent() == false); + checkWarningHeaders("query malformed, empty clause found at [3:17]"); + parser = XContentFactory.xContent(query).createParser(query); QueryParseContext otherContext = createParseContext(parser, ParseFieldMatcher.STRICT); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> otherContext.parseInnerQueryBuilder()); - assertThat(ex.getMessage(), startsWith("query malformed, empty clause found at")); + assertThat(ex.getMessage(), equalTo("query malformed, empty clause found at [3:17]")); + checkWarningHeaders("query malformed, empty clause found at [3:17]"); } public void testIgnoreUnmapped() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java index 723509d775f..2ad557dcc76 100644 --- a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java @@ -21,11 +21,12 @@ package org.elasticsearch.index.query; import org.apache.lucene.queries.TermsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -80,7 +81,7 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase } @Override - protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { + protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { if (queryBuilder.ids().size() == 0) { assertThat(query, instanceOf(MatchNoDocsQuery.class)); } else { @@ -139,6 +140,7 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(contentString)); assertEquals("Deprecated field [_type] used, expected [type] instead", e.getMessage()); + checkWarningHeaders("Deprecated field [_type] used, expected [type] instead"); //array of types can also be called type rather than types final String contentString2 = "{\n" + @@ -152,5 +154,6 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase e = expectThrows(IllegalArgumentException.class, () -> parseQuery(contentString2)); assertEquals("Deprecated field [types] used, expected [type] instead", e.getMessage()); + checkWarningHeaders("Deprecated field [_type] used, expected [type] instead"); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java index d1c2dcee90c..3b31d17d9ba 100644 --- a/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java @@ -20,12 +20,23 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; +import org.junit.After; import java.io.IOException; public class IndicesQueryBuilderTests extends AbstractQueryTestCase { + /** + * All tests create deprecation warnings when an new {@link IndicesQueryBuilder} is created. + * Instead of having to check them once in every single test, this is done here after each test is run + */ + @After + void checkWarningHeaders() throws IOException { + checkWarningHeaders("indices query is deprecated. Instead search on the '_index' field"); + } + @Override protected IndicesQueryBuilder doCreateTestQueryBuilder() { String[] indices; @@ -50,12 +61,12 @@ public class IndicesQueryBuilderTests extends AbstractQueryTestCase randomIntBetween(0, 128))); - break; - case 1: - instance.setSize(randomValueOtherThan(instance.getSize(), () -> randomIntBetween(0, 128))); - break; - case 2: - instance.setExplain(!instance.isExplain()); - break; - case 3: - instance.setVersion(!instance.isVersion()); - break; - case 4: - instance.setTrackScores(!instance.isTrackScores()); - break; - case 5: - instance.setName(randomValueOtherThan(instance.getName(), () -> randomAsciiOfLengthBetween(1, 16))); - break; - case 6: - if (randomBoolean()) { - instance.setDocValueFields(randomValueOtherThan(instance.getDocValueFields(), () -> { - return randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16)); - })); - } else { - instance.addDocValueField(randomAsciiOfLengthBetween(1, 16)); - } - break; - case 7: - if (randomBoolean()) { - instance.setScriptFields(randomValueOtherThan(instance.getScriptFields(), () -> { - return new HashSet<>(randomListStuff(16, InnerHitBuilderTests::randomScript));})); - } else { - SearchSourceBuilder.ScriptField script = randomScript(); - instance.addScriptField(script.fieldName(), script.script()); - } - break; - case 8: - instance.setFetchSourceContext(randomValueOtherThan(instance.getFetchSourceContext(), () -> { - FetchSourceContext randomFetchSourceContext; - if (randomBoolean()) { - randomFetchSourceContext = new FetchSourceContext(randomBoolean()); - } else { - randomFetchSourceContext = new FetchSourceContext( - generateRandomStringArray(12, 16, false), - generateRandomStringArray(12, 16, false) - ); - } - return randomFetchSourceContext; + static InnerHitBuilder mutate(InnerHitBuilder original) throws IOException { + final InnerHitBuilder copy = serializedCopy(original); + List modifiers = new ArrayList<>(12); + modifiers.add(() -> copy.setFrom(randomValueOtherThan(copy.getFrom(), () -> randomIntBetween(0, 128)))); + modifiers.add(() -> copy.setSize(randomValueOtherThan(copy.getSize(), () -> randomIntBetween(0, 128)))); + modifiers.add(() -> copy.setExplain(!copy.isExplain())); + modifiers.add(() -> copy.setVersion(!copy.isVersion())); + modifiers.add(() -> copy.setTrackScores(!copy.isTrackScores())); + modifiers.add(() -> copy.setName(randomValueOtherThan(copy.getName(), () -> randomAsciiOfLengthBetween(1, 16)))); + modifiers.add(() -> { + if (randomBoolean()) { + copy.setDocValueFields(randomValueOtherThan(copy.getDocValueFields(), () -> { + return randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16)); })); - break; - case 9: + } else { + copy.addDocValueField(randomAsciiOfLengthBetween(1, 16)); + } + }); + modifiers.add(() -> { + if (randomBoolean()) { + copy.setScriptFields(randomValueOtherThan(copy.getScriptFields(), () -> { + return new HashSet<>(randomListStuff(16, InnerHitBuilderTests::randomScript)); + })); + } else { + SearchSourceBuilder.ScriptField script = randomScript(); + copy.addScriptField(script.fieldName(), script.script()); + } + }); + modifiers.add(() -> copy.setFetchSourceContext(randomValueOtherThan(copy.getFetchSourceContext(), () -> { + FetchSourceContext randomFetchSourceContext; + if (randomBoolean()) { + randomFetchSourceContext = new FetchSourceContext(randomBoolean()); + } else { + randomFetchSourceContext = new FetchSourceContext(true, generateRandomStringArray(12, 16, false), + generateRandomStringArray(12, 16, false)); + } + return randomFetchSourceContext; + }))); + modifiers.add(() -> { if (randomBoolean()) { - final List> sortBuilders = randomValueOtherThan(instance.getSorts(), () -> { + final List> sortBuilders = randomValueOtherThan(copy.getSorts(), () -> { List> builders = randomListStuff(16, () -> SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values()))); return builders; }); - instance.setSorts(sortBuilders); + copy.setSorts(sortBuilders); } else { - instance.addSort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20))); + copy.addSort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20))); } - break; - case 10: - instance.setHighlightBuilder(randomValueOtherThan(instance.getHighlightBuilder(), - HighlightBuilderTests::randomHighlighterBuilder)); - break; - case 11: - if (instance.getStoredFieldsContext() == null || randomBoolean()) { - List previous = instance.getStoredFieldsContext() == null ? - Collections.emptyList() : instance.getStoredFieldsContext().fieldNames(); + }); + modifiers.add(() -> copy + .setHighlightBuilder(randomValueOtherThan(copy.getHighlightBuilder(), HighlightBuilderTests::randomHighlighterBuilder))); + modifiers.add(() -> { + if (copy.getStoredFieldsContext() == null || randomBoolean()) { + List previous = copy.getStoredFieldsContext() == null ? + Collections.emptyList() : copy.getStoredFieldsContext().fieldNames(); List newValues = randomValueOtherThan(previous, () -> randomListStuff(1, 16, () -> randomAsciiOfLengthBetween(1, 16))); - instance.setStoredFieldNames(newValues); + copy.setStoredFieldNames(newValues); } else { - instance.getStoredFieldsContext().addFieldName(randomAsciiOfLengthBetween(1, 16)); + copy.getStoredFieldsContext().addFieldName(randomAsciiOfLengthBetween(1, 16)); } - break; - default: - throw new IllegalStateException("unexpected surprise [" + surprise + "]"); - } - return instance; + }); + randomFrom(modifiers).run(); + return copy; } static SearchSourceBuilder.ScriptField randomScript() { - ScriptService.ScriptType randomScriptType = randomFrom(ScriptService.ScriptType.values()); - Map randomMap = null; + ScriptType randomScriptType = randomFrom(ScriptType.values()); + Map randomMap = new HashMap<>(); if (randomBoolean()) { - randomMap = new HashMap<>(); int numEntries = randomIntBetween(0, 32); for (int i = 0; i < numEntries; i++) { randomMap.put(String.valueOf(i), randomAsciiOfLength(16)); } } - Script script = new Script(randomAsciiOfLength(128), randomScriptType, randomAsciiOfLengthBetween(1, 4),randomMap); + Script script = new Script(randomScriptType, randomAsciiOfLengthBetween(1, 4), randomAsciiOfLength(128), randomMap); return new SearchSourceBuilder.ScriptField(randomAsciiOfLengthBetween(1, 32), script, randomBoolean()); } @@ -391,12 +345,7 @@ public class InnerHitBuilderTests extends ESTestCase { } private static InnerHitBuilder serializedCopy(InnerHitBuilder original) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - original.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - return new InnerHitBuilder(in); - } - } + return ESTestCase.copyWriteable(original, namedWriteableRegistry, InnerHitBuilder::new); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchAllQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchAllQueryBuilderTests.java index 9195fc83ecc..5a8597872b0 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchAllQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchAllQueryBuilderTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -35,7 +36,7 @@ public class MatchAllQueryBuilderTests extends AbstractQueryTestCase parseQuery(json, ParseFieldMatcher.STRICT)); assertThat(e.getMessage(), @@ -345,6 +349,9 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase parseQuery(json, ParseFieldMatcher.STRICT)); assertThat(e.getMessage(), @@ -371,6 +378,8 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase parseQuery(json, ParseFieldMatcher.STRICT)); diff --git a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java index eaf224b6668..c0900de4de1 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import org.junit.Before; @@ -243,7 +244,7 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase 0) { assertThat(query, instanceOf(BooleanQuery.class)); } else { @@ -300,6 +301,11 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase parseQuery(deprecatedJson)); assertEquals("Deprecated field [mlt] used, expected [more_like_this] instead", e.getMessage()); + + checkWarningHeaders("Deprecated field [mlt] used, expected [more_like_this] instead"); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index e96c99bdcf6..3826c927e01 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -27,16 +27,17 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.all.AllTermQuery; -import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.index.query.MultiMatchQueryBuilder.Type; import org.elasticsearch.index.search.MatchQuery; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -142,7 +143,7 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase innerHitBuilders = new HashMap<>(); InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders); diff --git a/core/src/test/java/org/elasticsearch/index/query/ParentIdQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/ParentIdQueryBuilderTests.java index 750d9ce319f..893222243b6 100644 --- a/core/src/test/java/org/elasticsearch/index/query/ParentIdQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/ParentIdQueryBuilderTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.TypeFieldMapper; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import org.hamcrest.Matchers; @@ -69,7 +70,7 @@ public class ParentIdQueryBuilderTests extends AbstractQueryTestCase 1") ); - Map parameters = new HashMap<>(); + Map parameters = new HashMap<>(); parameters.put("param1", 5); scriptQuery( new Script( - "mygroovyscript", - ScriptType.FILE, - "groovy", + ScriptType.FILE, "groovy", "mygroovyscript", parameters) ); diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java index 5568d2fa5a7..aa8541ab956 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java @@ -21,20 +21,27 @@ package org.elasticsearch.index.query; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; import org.junit.BeforeClass; import java.io.IOException; +import java.util.List; import java.util.Optional; import static java.util.Collections.emptyList; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; public class QueryParseContextTests extends ESTestCase { @@ -45,6 +52,20 @@ public class QueryParseContextTests extends ESTestCase { indicesQueriesRegistry = new SearchModule(Settings.EMPTY, false, emptyList()).getQueryParserRegistry(); } + private ThreadContext threadContext; + + @Before + public void beforeTest() throws IOException { + this.threadContext = new ThreadContext(Settings.EMPTY); + DeprecationLogger.setThreadContext(threadContext); + } + + @After + public void teardown() throws IOException { + DeprecationLogger.removeThreadContext(this.threadContext); + this.threadContext.close(); + } + public void testParseTopLevelBuilder() throws IOException { QueryBuilder query = new MatchQueryBuilder("foo", "bar"); String requestBody = "{ \"query\" : " + query.toString() + "}"; @@ -89,6 +110,9 @@ public class QueryParseContextTests extends ESTestCase { QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.EMPTY); Optional emptyQuery = context.parseInnerQueryBuilder(); assertFalse(emptyQuery.isPresent()); + final List warnings = threadContext.getResponseHeaders().get(DeprecationLogger.DEPRECATION_HEADER); + assertThat(warnings, hasSize(1)); + assertThat(warnings, hasItem(equalTo("query malformed, empty clause found at [1:2]"))); } } @@ -107,6 +131,9 @@ public class QueryParseContextTests extends ESTestCase { QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> context.parseInnerQueryBuilder()); assertEquals("query malformed, empty clause found at [1:2]", exception.getMessage()); + final List warnings = threadContext.getResponseHeaders().get(DeprecationLogger.DEPRECATION_HEADER); + assertThat(warnings, hasSize(1)); + assertThat(warnings, hasItem(equalTo("query malformed, empty clause found at [1:2]"))); } source = "{ \"foo\" : \"bar\" }"; @@ -122,6 +149,9 @@ public class QueryParseContextTests extends ESTestCase { ParsingException exception = expectThrows(ParsingException.class, () -> context.parseInnerQueryBuilder()); assertEquals("no [query] registered for [foo]", exception.getMessage()); } + final List warnings = threadContext.getResponseHeaders().get(DeprecationLogger.DEPRECATION_HEADER); + assertThat(warnings, hasSize(1)); + assertThat(warnings, hasItem(equalTo("query malformed, empty clause found at [1:2]"))); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java index a39fbae1764..6445ad8e7c6 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java @@ -42,11 +42,12 @@ public class QueryRewriteContextTests extends ESTestCase { .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) ); + final long nowInMills = randomPositiveLong(); IndicesQueriesRegistry indicesQueriesRegistry = new SearchModule(Settings.EMPTY, false, emptyList()).getQueryParserRegistry(); IndexSettings indexSettings = new IndexSettings(indexMetadata.build(), Settings.builder().put(ScriptSettings.LEGACY_SCRIPT_SETTING, defaultLegacyScriptLanguage).build()); QueryRewriteContext queryRewriteContext = - new QueryRewriteContext(indexSettings, null, null, indicesQueriesRegistry, null, null, null);; + new QueryRewriteContext(indexSettings, null, null, indicesQueriesRegistry, null, null, null, () -> nowInMills); // verify that the default script language in the query parse context is equal to defaultLegacyScriptLanguage variable: QueryParseContext queryParseContext = diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java index 3b3bdf31b75..29fe3af19c0 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java @@ -46,9 +46,10 @@ public class QueryShardContextTests extends ESTestCase { IndexSettings indexSettings = new IndexSettings(indexMetadata.build(), Settings.EMPTY); MapperService mapperService = mock(MapperService.class); when(mapperService.getIndexSettings()).thenReturn(indexSettings); + final long nowInMillis = randomPositiveLong(); QueryShardContext context = new QueryShardContext( - indexSettings, null, null, mapperService, null, null, null, null, null, null - ); + 0, indexSettings, null, null, mapperService, null, null, null, null, null, null, + () -> nowInMillis); context.setAllowUnmappedFields(false); MappedFieldType fieldType = new TextFieldMapper.TextFieldType(); diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index ce49f18ccfc..893c2ec4e56 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -36,14 +36,20 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.WildcardQuery; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.all.AllTermQuery; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import org.hamcrest.Matchers; import org.joda.time.DateTimeZone; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -102,9 +108,6 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase 0); + // splitOnWhitespace=false + { + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder("foo bar") + .field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2) + .splitOnWhitespace(false); + Query query = queryBuilder.toQuery(createShardContext()); + BooleanQuery bq1 = + new BooleanQuery.Builder() + .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "foo")), BooleanClause.Occur.SHOULD)) + .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), BooleanClause.Occur.SHOULD)) + .build(); + List disjuncts = new ArrayList<>(); + disjuncts.add(bq1); + disjuncts.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "foo bar"))); + DisjunctionMaxQuery expectedQuery = new DisjunctionMaxQuery(disjuncts, 0.0f); + assertThat(query, equalTo(expectedQuery)); + } + + { + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder("mapped_string:other foo bar") + .field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2) + .splitOnWhitespace(false); + Query query = queryBuilder.toQuery(createShardContext()); + BooleanQuery bq1 = + new BooleanQuery.Builder() + .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "foo")), BooleanClause.Occur.SHOULD)) + .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), BooleanClause.Occur.SHOULD)) + .build(); + List disjuncts = new ArrayList<>(); + disjuncts.add(bq1); + disjuncts.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "foo bar"))); + DisjunctionMaxQuery disjunctionMaxQuery = new DisjunctionMaxQuery(disjuncts, 0.0f); + BooleanQuery expectedQuery = + new BooleanQuery.Builder() + .add(disjunctionMaxQuery, BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "other")), BooleanClause.Occur.SHOULD) + .build(); + assertThat(query, equalTo(expectedQuery)); + } + + { + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder("foo OR bar") + .field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2) + .splitOnWhitespace(false); + Query query = queryBuilder.toQuery(createShardContext()); + + List disjuncts1 = new ArrayList<>(); + disjuncts1.add(new TermQuery(new Term(STRING_FIELD_NAME, "foo"))); + disjuncts1.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "foo"))); + DisjunctionMaxQuery maxQuery1 = new DisjunctionMaxQuery(disjuncts1, 0.0f); + + List disjuncts2 = new ArrayList<>(); + disjuncts2.add(new TermQuery(new Term(STRING_FIELD_NAME, "bar"))); + disjuncts2.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "bar"))); + DisjunctionMaxQuery maxQuery2 = new DisjunctionMaxQuery(disjuncts2, 0.0f); + + BooleanQuery expectedQuery = + new BooleanQuery.Builder() + .add(new BooleanClause(maxQuery1, BooleanClause.Occur.SHOULD)) + .add(new BooleanClause(maxQuery2, BooleanClause.Occur.SHOULD)) + .build(); + assertThat(query, equalTo(expectedQuery)); + } + + // split_on_whitespace=false breaks range query with simple syntax + { + // throws an exception when lenient is set to false + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder(">10 foo") + .field(INT_FIELD_NAME) + .splitOnWhitespace(false); + IllegalArgumentException exc = + expectThrows(IllegalArgumentException.class, () -> queryBuilder.toQuery(createShardContext())); + assertThat(exc.getMessage(), equalTo("For input string: \"10 foo\"")); + } + + { + // returns an empty boolean query when lenient is set to true + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder(">10 foo") + .field(INT_FIELD_NAME) + .splitOnWhitespace(false) + .lenient(true); + Query query = queryBuilder.toQuery(createShardContext()); + BooleanQuery bq = new BooleanQuery.Builder().build(); + assertThat(bq, equalTo(query)); + } + + // splitOnWhitespace=true + { + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder("foo bar") + .field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2) + .splitOnWhitespace(true); + Query query = queryBuilder.toQuery(createShardContext()); + + List disjuncts1 = new ArrayList<>(); + disjuncts1.add(new TermQuery(new Term(STRING_FIELD_NAME, "foo"))); + disjuncts1.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "foo"))); + DisjunctionMaxQuery maxQuery1 = new DisjunctionMaxQuery(disjuncts1, 0.0f); + + List disjuncts2 = new ArrayList<>(); + disjuncts2.add(new TermQuery(new Term(STRING_FIELD_NAME, "bar"))); + disjuncts2.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "bar"))); + DisjunctionMaxQuery maxQuery2 = new DisjunctionMaxQuery(disjuncts2, 0.0f); + + BooleanQuery expectedQuery = + new BooleanQuery.Builder() + .add(new BooleanClause(maxQuery1, BooleanClause.Occur.SHOULD)) + .add(new BooleanClause(maxQuery2, BooleanClause.Occur.SHOULD)) + .build(); + assertThat(query, equalTo(expectedQuery)); + } + + + } + public void testFromJson() throws IOException { String json = "{\n" + @@ -543,14 +667,13 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertThat(e.getMessage(), + containsString("cannot use [all_fields] parameter in conjunction with [default_field] or [fields]")); + + String json2 = + "{\n" + + " \"query_string\" : {\n" + + " \"query\" : \"this AND that OR thus\",\n" + + " \"default_field\" : \"foo\",\n" + + " \"all_fields\" : true\n" + + " }\n" + + "}"; + + e = expectThrows(ParsingException.class, () -> parseQuery(json2)); + assertThat(e.getMessage(), + containsString("cannot use [all_fields] parameter in conjunction with [default_field] or [fields]")); + } } diff --git a/core/src/test/java/org/elasticsearch/index/query/RandomQueryBuilder.java b/core/src/test/java/org/elasticsearch/index/query/RandomQueryBuilder.java index c244273d13a..68510d4e893 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RandomQueryBuilder.java +++ b/core/src/test/java/org/elasticsearch/index/query/RandomQueryBuilder.java @@ -19,8 +19,9 @@ package org.elasticsearch.index.query; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.elasticsearch.test.AbstractQueryTestCase; import java.util.Random; @@ -38,7 +39,7 @@ public class RandomQueryBuilder { * @return a random {@link QueryBuilder} */ public static QueryBuilder createQuery(Random r) { - switch (RandomInts.randomIntBetween(r, 0, 3)) { + switch (RandomNumbers.randomIntBetween(r, 0, 3)) { case 0: return new MatchAllQueryBuilderTests().createTestQueryBuilder(); case 1: @@ -61,7 +62,7 @@ public class RandomQueryBuilder { // for now, only use String Rangequeries for MultiTerm test, numeric and date makes little sense // see issue #12123 for discussion MultiTermQueryBuilder multiTermQueryBuilder; - switch(RandomInts.randomIntBetween(r, 0, 3)) { + switch(RandomNumbers.randomIntBetween(r, 0, 2)) { case 0: RangeQueryBuilder stringRangeQuery = new RangeQueryBuilder(AbstractQueryTestCase.STRING_FIELD_NAME); stringRangeQuery.from("a" + RandomStrings.randomAsciiOfLengthBetween(r, 1, 10)); @@ -69,19 +70,16 @@ public class RandomQueryBuilder { multiTermQueryBuilder = stringRangeQuery; break; case 1: - multiTermQueryBuilder = new FuzzyQueryBuilder(AbstractQueryTestCase.STRING_FIELD_NAME, RandomStrings.randomAsciiOfLengthBetween(r, 1, 10)); - break; - case 2: multiTermQueryBuilder = new PrefixQueryBuilderTests().createTestQueryBuilder(); break; - case 3: + case 2: multiTermQueryBuilder = new WildcardQueryBuilderTests().createTestQueryBuilder(); break; default: throw new UnsupportedOperationException(); } if (r.nextBoolean()) { - multiTermQueryBuilder.boost(2.0f / RandomInts.randomIntBetween(r, 1, 20)); + multiTermQueryBuilder.boost(2.0f / RandomNumbers.randomIntBetween(r, 1, 20)); } return multiTermQueryBuilder; } diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index 5ee538126d9..60c17d88e65 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -29,8 +29,12 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.LegacyDateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -116,8 +120,9 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase rewrittenQuery.rewrite(null)); + expectThrows(ElasticsearchParseException.class, () -> parseQuery(invalidQuery).toQuery(createShardContext())); } public void testDateRangeBoundaries() throws IOException { @@ -269,7 +330,7 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase DISJOINT assertEquals(Relation.DISJOINT, range.getRelation(context)); diff --git a/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java index 7daed416a05..b8363cfa06d 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -76,7 +77,7 @@ public class RegexpQueryBuilderTests extends AbstractQueryTestCase params = Collections.emptyMap(); - return new ScriptQueryBuilder(new Script(script, ScriptType.INLINE, MockScriptEngine.NAME, params)); + return new ScriptQueryBuilder(new Script(ScriptType.INLINE, MockScriptEngine.NAME, script, params)); } @Override - protected void doAssertLuceneQuery(ScriptQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { + protected boolean builderGeneratesCacheableQueries() { + return false; + } + + @Override + protected void doAssertLuceneQuery(ScriptQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { assertThat(query, instanceOf(ScriptQueryBuilder.ScriptQuery.class)); + // make sure the query would not get cached + ScriptQuery sQuery = (ScriptQuery) query; + ScriptQuery clone = new ScriptQuery(sQuery.script, sQuery.searchScript); + assertFalse(sQuery.equals(clone)); + assertFalse(sQuery.hashCode() == clone.hashCode()); } public void testIllegalConstructorArg() { @@ -51,12 +63,11 @@ public class ScriptQueryBuilderTests extends AbstractQueryTestCase getObjectsHoldingArbitraryContent() { //script_score.script.params can contain arbitrary parameters. no error is expected when //adding additional objects within the params object. - return Collections.singleton(Script.ScriptField.PARAMS.getPreferredName()); + return Collections.singleton(Script.PARAMS_PARSE_FIELD.getPreferredName()); + } + + @Override + protected boolean isCachable(ScriptQueryBuilder queryBuilder) { + return false; } } diff --git a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryParserTests.java b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryParserTests.java index 8511ad6d9c7..253bc95ffb8 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryParserTests.java @@ -28,14 +28,47 @@ import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanClause; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.MockFieldMapper; +import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import static java.util.Collections.emptyList; import static org.hamcrest.Matchers.equalTo; public class SimpleQueryParserTests extends ESTestCase { + + private static IndicesQueriesRegistry indicesQueriesRegistry; + + /** + * setup for the whole base test class + */ + @BeforeClass + public static void init() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); + indicesQueriesRegistry = searchModule.getQueryParserRegistry(); + } + + @AfterClass + public static void afterClass() throws Exception { + indicesQueriesRegistry = null; + } + private static class MockSimpleQueryParser extends SimpleQueryParser { public MockSimpleQueryParser(Analyzer analyzer, Map weights, int flags, Settings settings) { super(analyzer, weights, flags, settings, null); @@ -106,4 +139,45 @@ public class SimpleQueryParserTests extends ESTestCase { } } + public void testQuoteFieldSuffix() { + SimpleQueryParser.Settings sqpSettings = new SimpleQueryParser.Settings(); + sqpSettings.quoteFieldSuffix(".quote"); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_INDEX_UUID, "some_uuid") + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + IndexMetaData indexState = IndexMetaData.builder("index").settings(indexSettings).build(); + IndexSettings settings = new IndexSettings(indexState, Settings.EMPTY); + QueryShardContext mockShardContext = new QueryShardContext(0, settings, null, null, null, null, null, indicesQueriesRegistry, + null, null, null, System::currentTimeMillis) { + @Override + public MappedFieldType fieldMapper(String name) { + return new MockFieldMapper.FakeFieldType(); + } + }; + + SimpleQueryParser parser = new SimpleQueryParser(new StandardAnalyzer(), + Collections.singletonMap("foo", 1f), -1, sqpSettings, mockShardContext); + assertEquals(new TermQuery(new Term("foo", "bar")), parser.parse("bar")); + assertEquals(new TermQuery(new Term("foo.quote", "bar")), parser.parse("\"bar\"")); + + // Now check what happens if foo.quote does not exist + mockShardContext = new QueryShardContext(0, settings, null, null, null, null, null, indicesQueriesRegistry, + null, null, null, System::currentTimeMillis) { + @Override + public MappedFieldType fieldMapper(String name) { + if (name.equals("foo.quote")) { + return null; + } + return new MockFieldMapper.FakeFieldType(); + } + }; + parser = new SimpleQueryParser(new StandardAnalyzer(), + Collections.singletonMap("foo", 1f), -1, sqpSettings, mockShardContext); + assertEquals(new TermQuery(new Term("foo", "bar")), parser.parse("bar")); + assertEquals(new TermQuery(new Term("foo", "bar")), parser.parse("\"bar\"")); + } } diff --git a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index 93fbcfd930f..98c6ac00344 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -23,11 +23,17 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.util.TestUtil; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -38,6 +44,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; @@ -55,12 +62,6 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase flagSet = new HashSet<>(); int size = randomIntBetween(0, SimpleQueryStringFlag.values().length); @@ -108,28 +112,11 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase 1) { assertThat(query, instanceOf(BooleanQuery.class)); BooleanQuery boolQuery = (BooleanQuery) query; - if (queryBuilder.lowercaseExpandedTerms()) { - for (BooleanClause clause : boolQuery.clauses()) { - if (clause.getQuery() instanceof TermQuery) { - TermQuery inner = (TermQuery) clause.getQuery(); - assertThat(inner.getTerm().bytes().toString(), is(inner.getTerm().bytes().toString().toLowerCase(Locale.ROOT))); - } + for (BooleanClause clause : boolQuery.clauses()) { + if (clause.getQuery() instanceof TermQuery) { + TermQuery inner = (TermQuery) clause.getQuery(); + assertThat(inner.getTerm().bytes().toString(), is(inner.getTerm().bytes().toString().toLowerCase(Locale.ROOT))); } } assertThat(boolQuery.clauses().size(), equalTo(queryBuilder.fields().size())); @@ -270,7 +255,12 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase field = queryBuilder.fields().entrySet().iterator().next(); assertTermOrBoostQuery(query, field.getKey(), queryBuilder.value(), field.getValue()); } else if (queryBuilder.fields().size() == 0) { - assertTermQuery(query, MetaData.ALL, queryBuilder.value()); + MapperService ms = context.mapperService(); + if (ms.allEnabled()) { + assertTermQuery(query, MetaData.ALL, queryBuilder.value()); + } else { + assertThat(query.getClass(), equalTo(MatchNoDocsQuery.class)); + } } else { fail("Encountered lucene query type we do not have a validation implementation for in our " + SimpleQueryStringBuilderTests.class.getSimpleName()); @@ -329,10 +319,9 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase parseQuery(json)); + assertThat(e.getMessage(), + containsString("cannot use [all_fields] parameter in conjunction with [fields]")); + } } diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanContainingQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanContainingQueryBuilderTests.java index abaa35818c9..153958b9af3 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SpanContainingQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SpanContainingQueryBuilderTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; import org.apache.lucene.search.spans.SpanContainingQuery; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -35,7 +36,7 @@ public class SpanContainingQueryBuilderTests extends AbstractQueryTestCase 0 && context.fieldMapper(DATE_FIELD_NAME) != null); + MultiTermQueryBuilder query = new MultiTermQueryBuilder() { + @Override + public Query toQuery(QueryShardContext context) throws IOException { + return new TermQuery(new Term("foo", "bar")); + } - RangeQueryBuilder query = new RangeQueryBuilder(DATE_FIELD_NAME); + @Override + public Query toFilter(QueryShardContext context) throws IOException { + return toQuery(context); + } + + @Override + public QueryBuilder queryName(String queryName) { + return this; + } + + @Override + public String queryName() { + return "foo"; + } + + @Override + public float boost() { + return 1f; + } + + @Override + public QueryBuilder boost(float boost) { + return this; + } + + @Override + public String getName() { + return "foo"; + } + + @Override + public String getWriteableName() { + return "foo"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + + } + }; SpanMultiTermQueryBuilder spamMultiTermQuery = new SpanMultiTermQueryBuilder(query); UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> spamMultiTermQuery.toQuery(createShardContext())); diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java index 6be23412d8a..8afe80e5f71 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java @@ -22,9 +22,9 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanQuery; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -47,7 +47,7 @@ public class SpanNearQueryBuilderTests extends AbstractQueryTestCase spanQueryBuilderIterator = queryBuilder.clauses().iterator(); for (SpanQuery spanQuery : spanNearQuery.getClauses()) { - assertThat(spanQuery, equalTo(spanQueryBuilderIterator.next().toQuery(context))); + assertThat(spanQuery, equalTo(spanQueryBuilderIterator.next().toQuery(context.getQueryShardContext()))); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java index b8a9dcd2afc..961d6092d76 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.spans.SpanNotQuery; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -54,11 +55,11 @@ public class SpanNotQueryBuilderTests extends AbstractQueryTestCase spanQueryBuilderIterator = queryBuilder.clauses().iterator(); for (SpanQuery spanQuery : spanOrQuery.getClauses()) { - assertThat(spanQuery, equalTo(spanQueryBuilderIterator.next().toQuery(context))); + assertThat(spanQuery, equalTo(spanQueryBuilderIterator.next().toQuery(context.getQueryShardContext()))); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java index cfe764615c4..3ca2624205c 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SpanTermQueryBuilderTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.index.mapper.MappedFieldType; import com.fasterxml.jackson.core.io.JsonStringEncoder; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -64,11 +65,11 @@ public class SpanTermQueryBuilderTests extends AbstractTermQueryTestCase getObjectsHoldingArbitraryContent() { //script_score.script.params can contain arbitrary parameters. no error is expected when adding additional objects //within the params object. Score functions get parsed in the data nodes, so they are not validated in the coord node. - return new HashSet<>(Arrays.asList(Script.ScriptField.PARAMS.getPreferredName(), ExponentialDecayFunctionBuilder.NAME, + return new HashSet<>(Arrays.asList(Script.PARAMS_PARSE_FIELD.getPreferredName(), ExponentialDecayFunctionBuilder.NAME, LinearDecayFunctionBuilder.NAME, GaussDecayFunctionBuilder.NAME)); } @@ -168,16 +169,18 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase params = Collections.emptyMap(); functionBuilder = new ScriptScoreFunctionBuilder( - new Script(script, ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, params)); + new Script(ScriptType.INLINE, MockScriptEngine.NAME, script, params)); break; case 3: RandomScoreFunctionBuilder randomScoreFunctionBuilder = new RandomScoreFunctionBuilderWithFixedSeed(); - if (randomBoolean()) { - randomScoreFunctionBuilder.seed(randomLong()); - } else if (randomBoolean()) { - randomScoreFunctionBuilder.seed(randomInt()); - } else { - randomScoreFunctionBuilder.seed(randomAsciiOfLengthBetween(1, 10)); + if (randomBoolean()) { // sometimes provide no seed + if (randomBoolean()) { + randomScoreFunctionBuilder.seed(randomLong()); + } else if (randomBoolean()) { + randomScoreFunctionBuilder.seed(randomInt()); + } else { + randomScoreFunctionBuilder.seed(randomAsciiOfLengthBetween(1, 10)); + } } functionBuilder = randomScoreFunctionBuilder; break; @@ -238,7 +241,7 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase { throw new UnsupportedOperationException(); }); } //see #11120 diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 8ba0fd53d53..2425552c246 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -26,13 +26,11 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction.ReplicaResponse; -import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.support.replication.TransportWriteActionTestHelper; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -41,7 +39,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.Uid; @@ -68,6 +65,8 @@ import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.StreamSupport; +import static org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnPrimary; +import static org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnReplica; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -92,7 +91,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } protected DiscoveryNode getDiscoveryNode(String id) { - return new DiscoveryNode(id, id, new LocalTransportAddress(id), Collections.emptyMap(), + return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT); } @@ -427,17 +426,32 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase @Override protected PrimaryResult performOnPrimary(IndexShard primary, IndexRequest request) throws Exception { - TransportWriteAction.WriteResult result = TransportIndexAction.executeIndexRequestOnPrimary(request, primary, - null); + final Engine.IndexResult indexResult = executeIndexRequestOnPrimary(request, primary, + null); + if (indexResult.hasFailure() == false) { + // update the version on request so it will happen on the replicas + final long version = indexResult.getVersion(); + request.version(version); + request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); + request.seqNo(indexResult.getSeqNo()); + assert request.versionType().validateVersionForWrites(request.version()); + } request.primaryTerm(primary.getPrimaryTerm()); - TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.getLocation(), logger); - return new PrimaryResult(request, result.getResponse()); + TransportWriteActionTestHelper.performPostWriteActions(primary, request, indexResult.getTranslogLocation(), logger); + IndexResponse response = new IndexResponse( + primary.shardId(), + request.type(), + request.id(), + indexResult.getSeqNo(), + indexResult.getVersion(), + indexResult.isCreated()); + return new PrimaryResult(request, response); } @Override protected void performOnReplica(IndexRequest request, IndexShard replica) { - Engine.Index index = TransportIndexAction.executeIndexRequestOnReplica(request, replica); - TransportWriteActionTestHelper.performPostWriteActions(replica, request, index.getTranslogLocation(), logger); + final Engine.IndexResult result = executeIndexRequestOnReplica(request, replica); + TransportWriteActionTestHelper.performPostWriteActions(replica, request, result.getTranslogLocation(), logger); } } @@ -460,4 +474,5 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase replica.updateGlobalCheckpointOnReplica(request.getCheckpoint()); } } + } diff --git a/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index 884e1d45829..7d7b7a4cd6e 100644 --- a/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -22,15 +22,16 @@ package org.elasticsearch.index.search; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.MapperService; @@ -45,6 +46,8 @@ import java.io.IOException; import java.util.Arrays; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class MultiMatchQueryTests extends ESSingleNodeTestCase { @@ -75,7 +78,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { } public void testCrossFieldMultiMatchQuery() throws IOException { - QueryShardContext queryShardContext = indexService.newQueryShardContext(); + QueryShardContext queryShardContext = indexService.newQueryShardContext( + randomInt(20), null, () -> { throw new UnsupportedOperationException(); }); queryShardContext.setAllowUnmappedFields(true); Query parsedQuery = multiMatchQuery("banon").field("name.first", 2).field("name.last", 3).field("foobar").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).toQuery(queryShardContext); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -98,7 +102,9 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] {2, 3}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm( + indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), + new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -112,7 +118,9 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] {200, 30}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm( + indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), + new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -129,7 +137,9 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz") }; float[] boosts = new float[] {2}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm( + indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), + new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -151,7 +161,19 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { .add(expectedClause1, Occur.SHOULD) .add(expectedClause2, Occur.SHOULD) .build(); - Query actual = MultiMatchQuery.blendTerm(new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm( + indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), + new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } + + public void testMultiMatchPrefixWithAllField() throws IOException { + QueryShardContext queryShardContext = indexService.newQueryShardContext( + randomInt(20), null, () -> { throw new UnsupportedOperationException(); }); + queryShardContext.setAllowUnmappedFields(true); + Query parsedQuery = + multiMatchQuery("foo").field("_all").type(MultiMatchQueryBuilder.Type.PHRASE_PREFIX).toQuery(queryShardContext); + assertThat(parsedQuery, instanceOf(MultiPhrasePrefixQuery.class)); + assertThat(parsedQuery.toString(), equalTo("_all:\"foo*\"")); + } } diff --git a/core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java b/core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java index 1af1b55a6ed..08b16d3400d 100644 --- a/core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java @@ -30,8 +30,8 @@ import org.elasticsearch.test.geo.RandomGeoGenerator; import java.io.IOException; -import static org.hamcrest.Matchers.is; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; +import static org.hamcrest.Matchers.is; public class GeoPointParsingTests extends ESTestCase { static double TOLERANCE = 1E-5; @@ -112,13 +112,8 @@ public class GeoPointParsingTests extends ESTestCase { XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes()); parser.nextToken(); - - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); } public void testInvalidPointLatHashMix() throws IOException { @@ -130,12 +125,8 @@ public class GeoPointParsingTests extends ESTestCase { XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes()); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); } public void testInvalidPointLonHashMix() throws IOException { @@ -147,12 +138,8 @@ public class GeoPointParsingTests extends ESTestCase { XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes()); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); } public void testInvalidField() throws IOException { @@ -164,12 +151,8 @@ public class GeoPointParsingTests extends ESTestCase { XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes()); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); } private static XContentParser objectLatLon(double lat, double lon) throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java index 59c0e4a3bfc..c09b2face7e 100644 --- a/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.search.geo; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.distance.DistanceUtils; import org.apache.lucene.spatial.prefix.tree.Cell; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; @@ -33,6 +31,8 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.test.ESTestCase; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; import java.io.IOException; @@ -439,12 +439,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("geohash", 1.0).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("geohash must be a string")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), containsString("geohash must be a string")); } public void testParseGeoPointLatNoLon() throws IOException { @@ -452,12 +448,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field [lon] missing")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field [lon] missing")); } public void testParseGeoPointLonNoLat() throws IOException { @@ -465,12 +457,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("lon", lon).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field [lat] missing")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field [lat] missing")); } public void testParseGeoPointLonWrongType() throws IOException { @@ -478,12 +466,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).field("lon", false).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("longitude must be a number")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("longitude must be a number")); } public void testParseGeoPointLatWrongType() throws IOException { @@ -491,12 +475,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("lat", false).field("lon", lon).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("latitude must be a number")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("latitude must be a number")); } public void testParseGeoPointExtraField() throws IOException { @@ -505,12 +485,8 @@ public class GeoUtilsTests extends ESTestCase { BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).field("lon", lon).field("foo", true).endObject().bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); } public void testParseGeoPointLonLatGeoHash() throws IOException { @@ -521,12 +497,8 @@ public class GeoUtilsTests extends ESTestCase { .bytes(); XContentParser parser = XContentHelper.createParser(jsonBytes); parser.nextToken(); - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("field must be either lat/lon or geohash")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), containsString("field must be either lat/lon or geohash")); } public void testParseGeoPointArrayTooManyValues() throws IOException { @@ -539,12 +511,8 @@ public class GeoUtilsTests extends ESTestCase { while (parser.currentToken() != Token.START_ARRAY) { parser.nextToken(); } - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("only two values allowed")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("only two values allowed")); } public void testParseGeoPointArrayWrongType() throws IOException { @@ -555,12 +523,8 @@ public class GeoUtilsTests extends ESTestCase { while (parser.currentToken() != Token.START_ARRAY) { parser.nextToken(); } - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("numeric value expected")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("numeric value expected")); } public void testParseGeoPointInvalidType() throws IOException { @@ -569,12 +533,8 @@ public class GeoUtilsTests extends ESTestCase { while (parser.currentToken() != Token.VALUE_NUMBER) { parser.nextToken(); } - try { - GeoUtils.parseGeoPoint(parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("geo_point expected")); - } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertThat(e.getMessage(), is("geo_point expected")); } public void testPrefixTreeCellSizes() { diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java b/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java index 5b5b24bbe4b..4d73ce48a2e 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java @@ -51,8 +51,6 @@ import java.util.List; import static org.hamcrest.Matchers.equalTo; -/** - */ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldDataTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java index 2dc87594c9d..1dc982270f7 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java @@ -43,8 +43,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; -/** - */ public class DoubleNestedSortingTests extends AbstractNumberNestedSortingTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java index fcecee82ed8..4262b959099 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java @@ -43,8 +43,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; -/** - */ public class FloatNestedSortingTests extends DoubleNestedSortingTests { @Override diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java index 39994b0c057..c8f2a5f9dfa 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java @@ -26,8 +26,6 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.fieldcomparator.LongValuesComparatorSource; import org.elasticsearch.search.MultiValueMode; -/** - */ public class LongNestedSortingTests extends AbstractNumberNestedSortingTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index dd46c4a61db..b0999b2fa33 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -58,8 +58,6 @@ import java.util.List; import static org.hamcrest.Matchers.equalTo; -/** - */ public class NestedSortingTests extends AbstractFieldDataTestCase { @Override protected String getFieldDataType() { diff --git a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index 246ecdc154b..510371c2d51 100644 --- a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -60,7 +60,7 @@ public class GlobalCheckpointSyncActionTests extends ESTestCase { transport = new CapturingTransport(); clusterService = createClusterService(threadPool); transportService = new TransportService(clusterService.getSettings(), transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); @@ -101,9 +101,9 @@ public class GlobalCheckpointSyncActionTests extends ESTestCase { final ShardId shardId = new ShardId(index, id); final GlobalCheckpointSyncAction.PrimaryRequest primaryRequest = new GlobalCheckpointSyncAction.PrimaryRequest(shardId); if (randomBoolean()) { - action.shardOperationOnPrimary(primaryRequest); + action.shardOperationOnPrimary(primaryRequest, indexShard); } else { - action.shardOperationOnReplica(new GlobalCheckpointSyncAction.ReplicaRequest(primaryRequest, randomPositiveLong())); + action.shardOperationOnReplica(new GlobalCheckpointSyncAction.ReplicaRequest(primaryRequest, randomPositiveLong()), indexShard); } verify(translog).sync(); diff --git a/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java b/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java index c723538c837..6fd66f3fb73 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/CommitPointsTests.java @@ -29,9 +29,6 @@ import java.util.ArrayList; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class CommitPointsTests extends ESTestCase { private final Logger logger = Loggers.getLogger(CommitPointsTests.class); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java index cfadab6efb8..1600d22efd7 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java @@ -44,8 +44,6 @@ import java.util.Collections; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; -/** - */ public class IndexSearcherWrapperTests extends ESTestCase { public void testReaderCloseListenerIsCalled() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 852cc81c505..e75261326bb 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -42,7 +42,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.env.Environment; @@ -420,7 +419,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { IndexingOperationListener listener = new IndexingOperationListener() { @Override - public void postIndex(Engine.Index index, boolean created) { + public void postIndex(Engine.Index index, Engine.IndexResult result) { try { assertNotNull(shardRef.get()); // this is all IMC needs to do - check current memory and refresh @@ -434,7 +433,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { @Override - public void postDelete(Engine.Delete delete) { + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { try { assertNotNull(shardRef.get()); // this is all IMC needs to do - check current memory and refresh @@ -459,7 +458,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { public static final IndexShard recoverShard(IndexShard newShard) throws IOException { - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 3ab191f8106..52861f85def 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -58,7 +58,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -117,6 +116,7 @@ import java.util.function.BiConsumer; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.common.lucene.Lucene.cleanLuceneIndex; +import static org.elasticsearch.common.lucene.Lucene.readScoreDoc; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; @@ -424,7 +424,7 @@ public class IndexShardTests extends IndexShardTestCase { flushShard(shard); final IndexShard newShard = reinitShard(shard); - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); Store.MetadataSnapshot snapshot = newShard.snapshotStoreMetadata(); assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); @@ -472,8 +472,6 @@ public class IndexShardTests extends IndexShardTestCase { throw new RuntimeException(ex); } } - - ; }; thread[i].start(); } @@ -570,11 +568,15 @@ public class IndexShardTests extends IndexShardTestCase { } @Override - public void postIndex(Engine.Index index, boolean created) { - if (created) { - postIndexCreate.incrementAndGet(); + public void postIndex(Engine.Index index, Engine.IndexResult result) { + if (result.hasFailure() == false) { + if (result.isCreated()) { + postIndexCreate.incrementAndGet(); + } else { + postIndexUpdate.incrementAndGet(); + } } else { - postIndexUpdate.incrementAndGet(); + postIndex(index, result.getFailure()); } } @@ -590,8 +592,12 @@ public class IndexShardTests extends IndexShardTestCase { } @Override - public void postDelete(Engine.Delete delete) { - postDelete.incrementAndGet(); + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { + if (result.hasFailure() == false) { + postDelete.incrementAndGet(); + } else { + postDelete(delete, result.getFailure()); + } } @Override @@ -866,7 +872,7 @@ public class IndexShardTests extends IndexShardTestCase { translogOps = 0; } IndexShard newShard = reinitShard(shard); - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); assertEquals(translogOps, newShard.recoveryState().getTranslog().recoveredOperations()); @@ -889,7 +895,7 @@ public class IndexShardTests extends IndexShardTestCase { ShardRoutingHelper.initWithSameId(shardRouting, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE) ); - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); assertEquals(0, newShard.recoveryState().getTranslog().recoveredOperations()); @@ -914,7 +920,7 @@ public class IndexShardTests extends IndexShardTestCase { cleanLuceneIndex(store.directory()); store.decRef(); IndexShard newShard = reinitShard(shard); - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); ShardRouting routing = newShard.routingEntry(); newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null)); try { @@ -988,7 +994,7 @@ public class IndexShardTests extends IndexShardTestCase { Store sourceStore = source.store(); Store targetStore = target.store(); - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); target.markAsRecovering("store", new RecoveryState(routing, localNode, null)); assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") { @Override @@ -1133,7 +1139,7 @@ public class IndexShardTests extends IndexShardTestCase { } @Override - public void postIndex(Engine.Index index, boolean created) { + public void postIndex(Engine.Index index, Engine.IndexResult result) { postIndex.incrementAndGet(); } @@ -1144,7 +1150,7 @@ public class IndexShardTests extends IndexShardTestCase { } @Override - public void postDelete(Engine.Delete delete) { + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { postDelete.incrementAndGet(); } @@ -1176,6 +1182,7 @@ public class IndexShardTests extends IndexShardTestCase { throw new RuntimeException("boom"); } + @Override public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { return searcher; } @@ -1227,7 +1234,7 @@ public class IndexShardTests extends IndexShardTestCase { IndexShard shard = newStartedShard(true); indexDoc(shard, "type", "0"); shard = reinitShard(shard); - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); shard.markAsRecovering("for testing", new RecoveryState(shard.routingEntry(), localNode, null)); // Shard is still inactive since we haven't started recovering yet assertFalse(shard.isActive()); @@ -1254,7 +1261,7 @@ public class IndexShardTests extends IndexShardTestCase { indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); IndexShard replica = newShard(primary.shardId(), false, "n2", metaData, null); - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); // Shard is still inactive since we haven't started recovering yet assertFalse(replica.isActive()); @@ -1302,7 +1309,7 @@ public class IndexShardTests extends IndexShardTestCase { ShardRoutingState.INITIALIZING, RecoverySource.LocalShardsRecoverySource.INSTANCE); final IndexShard targetShard; - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); Map requestedMappingUpdates = ConcurrentCollections.newConcurrentMap(); { targetShard = newShard(targetRouting); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index d1cf8b32f58..c28e34d9ca4 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.shard; import org.apache.lucene.index.Term; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -46,8 +47,12 @@ public class IndexingOperationListenerTests extends ESTestCase{ } @Override - public void postIndex(Engine.Index index, boolean created) { - postIndex.incrementAndGet(); + public void postIndex(Engine.Index index, Engine.IndexResult result) { + if (result.hasFailure() == false) { + postIndex.incrementAndGet(); + } else { + postIndex(index, result.getFailure()); + } } @Override @@ -62,8 +67,12 @@ public class IndexingOperationListenerTests extends ESTestCase{ } @Override - public void postDelete(Engine.Delete delete) { - postDelete.incrementAndGet(); + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { + if (result.hasFailure() == false) { + postDelete.incrementAndGet(); + } else { + postDelete(delete, result.getFailure()); + } } @Override @@ -79,12 +88,14 @@ public class IndexingOperationListenerTests extends ESTestCase{ } @Override - public void postIndex(Engine.Index index, boolean created) { - throw new RuntimeException(); } + public void postIndex(Engine.Index index, Engine.IndexResult result) { + throw new RuntimeException(); + } @Override public void postIndex(Engine.Index index, Exception ex) { - throw new RuntimeException(); } + throw new RuntimeException(); + } @Override public Engine.Delete preDelete(Engine.Delete delete) { @@ -92,8 +103,9 @@ public class IndexingOperationListenerTests extends ESTestCase{ } @Override - public void postDelete(Engine.Delete delete) { - throw new RuntimeException(); } + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { + throw new RuntimeException(); + } @Override public void postDelete(Engine.Delete delete, Exception ex) { @@ -111,7 +123,7 @@ public class IndexingOperationListenerTests extends ESTestCase{ IndexingOperationListener.CompositeListener compositeListener = new IndexingOperationListener.CompositeListener(indexingOperationListeners, logger); Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", "1")); Engine.Index index = new Engine.Index(new Term("_uid", "1"), null); - compositeListener.postDelete(delete); + compositeListener.postDelete(delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true)); assertEquals(0, preIndex.get()); assertEquals(0, postIndex.get()); assertEquals(0, postIndexException.get()); @@ -135,7 +147,7 @@ public class IndexingOperationListenerTests extends ESTestCase{ assertEquals(2, postDelete.get()); assertEquals(2, postDeleteException.get()); - compositeListener.postIndex(index, false); + compositeListener.postIndex(index, new Engine.IndexResult(0, SequenceNumbersService.UNASSIGNED_SEQ_NO, false)); assertEquals(0, preIndex.get()); assertEquals(2, postIndex.get()); assertEquals(0, postIndexException.get()); diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 8147b140805..01914a93805 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -137,7 +137,7 @@ public class RefreshListenersTests extends ESTestCase { public void testTooMany() throws Exception { assertFalse(listeners.refreshNeeded()); - Engine.Index index = index("1"); + Engine.IndexResult index = index("1"); // Fill the listener slots List nonForcedListeners = new ArrayList<>(maxListeners); @@ -168,7 +168,7 @@ public class RefreshListenersTests extends ESTestCase { } public void testAfterRefresh() throws Exception { - Engine.Index index = index("1"); + Engine.IndexResult index = index("1"); engine.refresh("I said so"); if (randomBoolean()) { index(randomFrom("1" /* same document */, "2" /* different document */)); @@ -198,7 +198,7 @@ public class RefreshListenersTests extends ESTestCase { refresher.start(); try { for (int i = 0; i < 1000; i++) { - Engine.Index index = index("1"); + Engine.IndexResult index = index("1"); DummyRefreshListener listener = new DummyRefreshListener(); boolean immediate = listeners.addOrNotify(index.getTranslogLocation(), listener); if (immediate) { @@ -234,8 +234,8 @@ public class RefreshListenersTests extends ESTestCase { for (int iteration = 1; iteration <= 50; iteration++) { try { String testFieldValue = String.format(Locale.ROOT, "%s%04d", threadId, iteration); - Engine.Index index = index(threadId, testFieldValue); - assertEquals(iteration, index.version()); + Engine.IndexResult index = index(threadId, testFieldValue); + assertEquals(iteration, index.getVersion()); DummyRefreshListener listener = new DummyRefreshListener(); listeners.addOrNotify(index.getTranslogLocation(), listener); @@ -245,7 +245,7 @@ public class RefreshListenersTests extends ESTestCase { } listener.assertNoError(); - Engine.Get get = new Engine.Get(false, index.uid()); + Engine.Get get = new Engine.Get(false, new Term("_uid", "test:"+threadId)); try (Engine.GetResult getResult = engine.get(get)) { assertTrue("document not found", getResult.exists()); assertEquals(iteration, getResult.version()); @@ -267,11 +267,11 @@ public class RefreshListenersTests extends ESTestCase { refresher.cancel(); } - private Engine.Index index(String id) { + private Engine.IndexResult index(String id) { return index(id, "test"); } - private Engine.Index index(String id, String testFieldValue) { + private Engine.IndexResult index(String id, String testFieldValue) { String type = "test"; String uid = type + ":" + id; Document document = new Document(); @@ -284,8 +284,7 @@ public class RefreshListenersTests extends ESTestCase { BytesReference source = new BytesArray(new byte[] { 1 }); ParsedDocument doc = new ParsedDocument(versionField, seqNoField, id, type, null, -1, -1, Arrays.asList(document), source, null); Engine.Index index = new Engine.Index(new Term("_uid", uid), doc); - engine.index(index); - return index; + return engine.index(index); } private static class DummyRefreshListener implements Consumer { diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index f3433a9669a..bcb29b66148 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -34,8 +34,6 @@ import java.nio.file.Path; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -/** - */ public class ShardPathTests extends ESTestCase { public void testLoadShardPath() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(Settings.builder().build())) { @@ -66,9 +64,8 @@ public class ShardPathTests extends ESTestCase { assumeTrue("This test tests multi data.path but we only got one", paths.length > 1); int id = randomIntBetween(1, 10); ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, indexUUID, AllocationId.newInitializing()), paths); - ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { + Exception e = expectThrows(IllegalStateException.class, () -> + ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings))); assertThat(e.getMessage(), containsString("more than one shard state found")); } } @@ -83,9 +80,8 @@ public class ShardPathTests extends ESTestCase { Path path = randomFrom(paths); int id = randomIntBetween(1, 10); ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, "0xDEADBEEF", AllocationId.newInitializing()), path); - ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { + Exception e = expectThrows(IllegalStateException.class, () -> + ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings))); assertThat(e.getMessage(), containsString("expected: foobar on shard path")); } } @@ -93,12 +89,8 @@ public class ShardPathTests extends ESTestCase { public void testIllegalCustomDataPath() { Index index = new Index("foo", "foo"); final Path path = createTempDir().resolve(index.getUUID()).resolve("0"); - try { - new ShardPath(true, path, path, new ShardId(index, 0)); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), is("shard state path must be different to the data path when using custom data paths")); - } + Exception e = expectThrows(IllegalArgumentException.class, () -> new ShardPath(true, path, path, new ShardId(index, 0))); + assertThat(e.getMessage(), is("shard state path must be different to the data path when using custom data paths")); } public void testValidCtor() { diff --git a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java index abaebb88c5e..5968ff25e98 100644 --- a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java +++ b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java @@ -37,8 +37,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -/** - */ public class FileInfoTests extends ESTestCase { public void testToFromXContent() throws IOException { final int iters = scaledRandomIntBetween(1, 10); diff --git a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java index 4bd8ba9cb3e..74b61047ace 100644 --- a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java +++ b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.index.snapshots.blobstore; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayInputStream; @@ -111,7 +111,7 @@ public class SlicedInputStreamTests extends ESTestCase { } private byte[] randomBytes(Random random) { - int length = RandomInts.randomIntBetween(random, 1, 10); + int length = RandomNumbers.randomIntBetween(random, 1, 10); byte[] data = new byte[length]; random.nextBytes(data); return data; diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index a996c9f4bd8..d1be0d77613 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -60,8 +60,8 @@ import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; +import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -178,7 +178,8 @@ public class CorruptedFileIT extends ESIntegTestCase { .timeout("5m") // sometimes due to cluster rebalacing and random settings default timeout is just not enough. .waitForNoRelocatingShards(true)).actionGet(); if (health.isTimedOut()) { - logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false)); } assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -284,7 +285,8 @@ public class CorruptedFileIT extends ESIntegTestCase { .health(Requests.clusterHealthRequest("test")).get(); if (response.getStatus() != ClusterHealthStatus.RED) { logger.info("Cluster turned red in busy loop: {}", didClusterTurnRed); - logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); } assertThat(response.getStatus(), is(ClusterHealthStatus.RED)); ClusterState state = client().admin().cluster().prepareState().get().getState(); @@ -445,7 +447,8 @@ public class CorruptedFileIT extends ESIntegTestCase { ClusterHealthResponse actionGet = client().admin().cluster() .health(Requests.clusterHealthRequest("test").waitForGreenStatus()).actionGet(); if (actionGet.isTimedOut()) { - logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("ensureGreen timed out, cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false)); } // we are green so primaries got not corrupted. diff --git a/core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java b/core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java index fe5410ccaf8..3837f4faa85 100644 --- a/core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java +++ b/core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java @@ -41,8 +41,6 @@ import java.io.IOException; import java.nio.file.Path; import java.util.Locale; -/** - */ public class IndexStoreTests extends ESTestCase { public void testStoreDirectory() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java b/core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java index cb0397990ab..925bf56fe70 100644 --- a/core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java +++ b/core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java @@ -47,8 +47,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -/** - */ @ESIntegTestCase.ClusterScope(minNumDataNodes = 2) public class SuggestStatsIT extends ESIntegTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java index 1fab2a38086..8682d8127ae 100644 --- a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java @@ -23,10 +23,10 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.InvalidIndexNameException; @@ -34,6 +34,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.Collection; @@ -47,19 +49,17 @@ import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicIntegerArray; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -/** - * - */ public class IndexActionIT extends ESIntegTestCase { /** * This test tries to simulate load while creating an index and indexing documents * while the index is being created. */ + + @TestLogging("_root:DEBUG,org.elasticsearch.index.shard.IndexShard:TRACE,org.elasticsearch.action.search:TRACE") public void testAutoGenerateIdNoDuplicates() throws Exception { int numberOfIterations = scaledRandomIntBetween(10, 50); for (int i = 0; i < numberOfIterations; i++) { @@ -69,7 +69,7 @@ public class IndexActionIT extends ESIntegTestCase { logger.info("indexing [{}] docs", numOfDocs); List builders = new ArrayList<>(numOfDocs); for (int j = 0; j < numOfDocs; j++) { - builders.add(client().prepareIndex("test", "type").setSource("field", "value")); + builders.add(client().prepareIndex("test", "type").setSource("field", "value_" + j)); } indexRandom(true, builders); logger.info("verifying indexed content"); @@ -77,7 +77,13 @@ public class IndexActionIT extends ESIntegTestCase { for (int j = 0; j < numOfChecks; j++) { try { logger.debug("running search with all types"); - assertHitCount(client().prepareSearch("test").get(), numOfDocs); + SearchResponse response = client().prepareSearch("test").get(); + if (response.getHits().totalHits() != numOfDocs) { + final String message = "Count is " + response.getHits().totalHits() + " but " + numOfDocs + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } } catch (Exception e) { logger.error("search for all docs types failed", e); if (firstError == null) { @@ -86,7 +92,13 @@ public class IndexActionIT extends ESIntegTestCase { } try { logger.debug("running search with a specific type"); - assertHitCount(client().prepareSearch("test").setTypes("type").get(), numOfDocs); + SearchResponse response = client().prepareSearch("test").setTypes("type").get(); + if (response.getHits().totalHits() != numOfDocs) { + final String message = "Count is " + response.getHits().totalHits() + " but " + numOfDocs + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } } catch (Exception e) { logger.error("search for all docs of a specific type failed", e); if (firstError == null) { diff --git a/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java b/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java index 27f3bfb123f..db03e51d4d9 100644 --- a/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java +++ b/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java @@ -52,9 +52,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -/** - * - */ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class IndexLifecycleActionIT extends ESIntegTestCase { public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java index e91ed066cc6..82482b1f821 100644 --- a/core/src/test/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java @@ -20,11 +20,14 @@ package org.elasticsearch.indices; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.test.ESIntegTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -45,6 +48,12 @@ public class DateMathIndexExpressionsIntegrationIT extends ESIntegTestCase { String index3 = ".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(now.minusDays(2)); createIndex(index1, index2, index3); + GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(index1, index2, index3).get(); + assertEquals(index1, getSettingsResponse.getSetting(index1, IndexMetaData.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(index2, getSettingsResponse.getSetting(index2, IndexMetaData.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(index3, getSettingsResponse.getSetting(index3, IndexMetaData.SETTING_INDEX_PROVIDED_NAME)); + + String dateMathExp1 = "<.marvel-{now/d}>"; String dateMathExp2 = "<.marvel-{now/d-1d}>"; String dateMathExp3 = "<.marvel-{now/d-2d}>"; @@ -69,6 +78,17 @@ public class DateMathIndexExpressionsIntegrationIT extends ESIntegTestCase { assertThat(getResponse.isExists(), is(true)); assertThat(getResponse.getId(), equalTo("3")); + MultiGetResponse mgetResponse = client().prepareMultiGet() + .add(dateMathExp1, "type", "1") + .add(dateMathExp2, "type", "2") + .add(dateMathExp3, "type", "3").get(); + assertThat(mgetResponse.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(mgetResponse.getResponses()[0].getResponse().getId(), equalTo("1")); + assertThat(mgetResponse.getResponses()[1].getResponse().isExists(), is(true)); + assertThat(mgetResponse.getResponses()[1].getResponse().getId(), equalTo("2")); + assertThat(mgetResponse.getResponses()[2].getResponse().isExists(), is(true)); + assertThat(mgetResponse.getResponses()[2].getResponse().getId(), equalTo("3")); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(dateMathExp1, dateMathExp2, dateMathExp3).get(); assertThat(indicesStatsResponse.getIndex(index1), notNullValue()); assertThat(indicesStatsResponse.getIndex(index2), notNullValue()); @@ -122,6 +142,12 @@ public class DateMathIndexExpressionsIntegrationIT extends ESIntegTestCase { String dateMathExp3 = "<.marvel-{now/d-2d}>"; createIndex(dateMathExp1, dateMathExp2, dateMathExp3); + + GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(index1, index2, index3).get(); + assertEquals(dateMathExp1, getSettingsResponse.getSetting(index1, IndexMetaData.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(dateMathExp2, getSettingsResponse.getSetting(index2, IndexMetaData.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(dateMathExp3, getSettingsResponse.getSetting(index3, IndexMetaData.SETTING_INDEX_PROVIDED_NAME)); + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); assertThat(clusterState.metaData().index(index1), notNullValue()); assertThat(clusterState.metaData().index(index2), notNullValue()); diff --git a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 2d0e4a3aeb9..e9a717f6636 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.IndexService; @@ -447,7 +446,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { try { assertEquals(0, imc.availableShards().size()); ShardRouting routing = newShard.routingEntry(); - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null)); assertEquals(1, imc.availableShards().size()); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 215022bcb33..46cd30112b5 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -26,10 +26,8 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; @@ -95,8 +93,7 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas }; indicesService.deleteIndex(idx, "simon says"); try { - NodeServicesProvider nodeServicesProvider = getInstanceFromNode(NodeServicesProvider.class); - IndexService index = indicesService.createIndex(nodeServicesProvider, metaData, Arrays.asList(countingListener), s -> {}); + IndexService index = indicesService.createIndex(metaData, Arrays.asList(countingListener), s -> {}); idx = index.index(); ShardRouting newRouting = shardRouting; String nodeId = newRouting.currentNodeId(); @@ -106,7 +103,7 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); IndexShard shard = index.createShard(newRouting); shard.updateRoutingEntry(newRouting); - final DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), + final DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); shard.markAsRecovering("store", new RecoveryState(newRouting, localNode, null)); shard.recoverFromStore(); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index 13dad84eb9b..2734fd78284 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -45,7 +45,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -444,21 +443,15 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { public void testAllMissingStrict() throws Exception { createIndex("test1"); - try { + expectThrows(IndexNotFoundException.class, () -> client().prepareSearch("test2") .setQuery(matchAllQuery()) - .execute().actionGet(); - fail("Exception should have been thrown."); - } catch (IndexNotFoundException e) { - } + .execute().actionGet()); - try { + expectThrows(IndexNotFoundException.class, () -> client().prepareSearch("test2","test3") .setQuery(matchAllQuery()) - .execute().actionGet(); - fail("Exception should have been thrown."); - } catch (IndexNotFoundException e) { - } + .execute().actionGet()); //you should still be able to run empty searches without things blowing up client().prepareSearch().setQuery(matchAllQuery()).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 7b9f2dbb7cf..1e97d4dd57b 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -19,9 +19,11 @@ package org.elasticsearch.indices; +import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -30,10 +32,13 @@ import org.elasticsearch.test.ESIntegTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.chrono.ISOChronology; +import org.joda.time.format.DateTimeFormat; import java.util.List; import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; +import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; @@ -357,7 +362,7 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { public void testCanCache() throws Exception { assertAcked(client().admin().indices().prepareCreate("index").addMapping("type", "s", "type=date") .setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true, IndexMetaData.SETTING_NUMBER_OF_SHARDS, - 5, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + 2, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .get()); indexRandom(true, client().prepareIndex("index", "type", "1").setRouting("1").setSource("s", "2016-03-19"), client().prepareIndex("index", "type", "2").setRouting("1").setSource("s", "2016-03-20"), @@ -406,15 +411,88 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), equalTo(0L)); - // If size > 1 and cache flag is set on the request we should cache - final SearchResponse r4 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1) - .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")).get(); + // If the request has an aggregation containing now we should not cache + final SearchResponse r4 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) + .addAggregation(filter("foo", QueryBuilders.rangeQuery("s").from("now-10y").to("now"))).get(); assertSearchResponse(r4); assertThat(r4.getHits().getTotalHits(), equalTo(7L)); assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), equalTo(0L)); assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(5L)); + equalTo(0L)); + + // If the request has an aggregation containng now we should not cache + final SearchResponse r5 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) + .addAggregation(dateRange("foo").field("s").addRange("now-10y", "now")).get(); + assertSearchResponse(r5); + assertThat(r5.getHits().getTotalHits(), equalTo(7L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(0L)); + + // If size > 1 and cache flag is set on the request we should cache + final SearchResponse r6 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1) + .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")).get(); + assertSearchResponse(r6); + assertThat(r6.getHits().getTotalHits(), equalTo(7L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(2L)); + } + + public void testCacheWithFilteredAlias() { + assertAcked(client().admin().indices().prepareCreate("index").addMapping("type", "created_at", "type=date") + .setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true, IndexMetaData.SETTING_NUMBER_OF_SHARDS, + 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .addAlias(new Alias("last_week").filter(QueryBuilders.rangeQuery("created_at").gte("now-7d/d"))) + .get()); + DateTime now = new DateTime(DateTimeZone.UTC); + client().prepareIndex("index", "type", "1").setRouting("1").setSource("created_at", + DateTimeFormat.forPattern("YYYY-MM-dd").print(now)).get(); + refresh(); + + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(0L)); + + SearchResponse r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + + r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + + r1 = client().prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(2L)); + + r1 = client().prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(2L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(2L)); } } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index 8bb8a4ddf8a..5d5584a156f 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -31,7 +31,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -43,6 +43,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.Arrays; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; public class IndicesRequestCacheTests extends ESTestCase { @@ -59,23 +60,25 @@ public class IndicesRequestCacheTests extends ESTestCase { AtomicBoolean indexShard = new AtomicBoolean(true); // initial cache - TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); - BytesReference value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + TestEntity entity = new TestEntity(requestCacheStats, indexShard); + Loader loader = new Loader(reader, 0); + BytesReference value = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertFalse(entity.loadedFromCache()); + assertFalse(loader.loadedFromCache); assertEquals(1, cache.count()); // cache hit - entity = new TestEntity(requestCacheStats, reader, indexShard, 0); - value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + entity = new TestEntity(requestCacheStats, indexShard); + loader = new Loader(reader, 0); + value = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value.streamInput().readString()); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(entity.loadedFromCache()); + assertTrue(loader.loadedFromCache); assertEquals(1, cache.count()); assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length()); assertEquals(1, cache.numRegisteredCloseListeners()); @@ -91,7 +94,7 @@ public class IndicesRequestCacheTests extends ESTestCase { assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(entity.loadedFromCache()); + assertTrue(loader.loadedFromCache); assertEquals(0, cache.count()); assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); @@ -114,46 +117,50 @@ public class IndicesRequestCacheTests extends ESTestCase { DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); // initial cache - TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); - BytesReference value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + TestEntity entity = new TestEntity(requestCacheStats, indexShard); + Loader loader = new Loader(reader, 0); + BytesReference value = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertFalse(entity.loadedFromCache()); + assertFalse(loader.loadedFromCache); assertEquals(1, cache.count()); assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length()); final int cacheSize = requestCacheStats.stats().getMemorySize().bytesAsInt(); assertEquals(1, cache.numRegisteredCloseListeners()); // cache the second - TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); - value = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + loader = new Loader(secondReader, 0); + value = cache.getOrCompute(entity, loader, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertFalse(secondEntity.loadedFromCache()); + assertFalse(loader.loadedFromCache); assertEquals(2, cache.count()); assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > cacheSize + value.length()); assertEquals(2, cache.numRegisteredCloseListeners()); - secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); - value = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + secondEntity = new TestEntity(requestCacheStats, indexShard); + loader = new Loader(secondReader, 0); + value = cache.getOrCompute(secondEntity, loader, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value.streamInput().readString()); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(secondEntity.loadedFromCache()); + assertTrue(loader.loadedFromCache); assertEquals(2, cache.count()); - entity = new TestEntity(requestCacheStats, reader, indexShard, 0); - value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + entity = new TestEntity(requestCacheStats, indexShard); + loader = new Loader(reader, 0); + value = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value.streamInput().readString()); assertEquals(2, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(entity.loadedFromCache()); + assertTrue(loader.loadedFromCache); assertEquals(2, cache.count()); // Closing the cache doesn't change returned entities @@ -161,8 +168,8 @@ public class IndicesRequestCacheTests extends ESTestCase { cache.cleanCache(); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(entity.loadedFromCache()); - assertTrue(secondEntity.loadedFromCache()); + assertTrue(loader.loadedFromCache); + assertTrue(loader.loadedFromCache); assertEquals(1, cache.count()); assertEquals(cacheSize, requestCacheStats.stats().getMemorySize().bytesAsInt()); assertEquals(1, cache.numRegisteredCloseListeners()); @@ -178,8 +185,8 @@ public class IndicesRequestCacheTests extends ESTestCase { cache.cleanCache(); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(entity.loadedFromCache()); - assertTrue(secondEntity.loadedFromCache()); + assertTrue(loader.loadedFromCache); + assertTrue(loader.loadedFromCache); assertEquals(0, cache.count()); assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); @@ -200,16 +207,18 @@ public class IndicesRequestCacheTests extends ESTestCase { DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); + TestEntity entity = new TestEntity(requestCacheStats, indexShard); + Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); - - BytesReference value1 = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + Loader secondLoader = new Loader(secondReader, 0); + + BytesReference value1 = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value1.streamInput().readString()); - BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value2.streamInput().readString()); size = requestCacheStats.stats().getMemorySize(); IOUtils.close(reader, secondReader, writer, dir, cache); @@ -226,24 +235,27 @@ public class IndicesRequestCacheTests extends ESTestCase { DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); + TestEntity entity = new TestEntity(requestCacheStats, indexShard); + Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); + TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + Loader secondLoader = new Loader(secondReader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity thirddEntity = new TestEntity(requestCacheStats, thirdReader, indexShard, 0); + TestEntity thirddEntity = new TestEntity(requestCacheStats, indexShard); + Loader thirdLoader = new Loader(thirdReader, 0); - BytesReference value1 = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + BytesReference value1 = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value1.streamInput().readString()); - BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value2.streamInput().readString()); logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); - BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); + BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termQuery.buildAsBytes()); assertEquals("baz", value3.streamInput().readString()); assertEquals(2, cache.count()); assertEquals(1, requestCacheStats.stats().getEvictions()); @@ -262,25 +274,28 @@ public class IndicesRequestCacheTests extends ESTestCase { DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); + TestEntity entity = new TestEntity(requestCacheStats, indexShard); + Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); + TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + Loader secondLoader = new Loader(secondReader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); AtomicBoolean differentIdentity = new AtomicBoolean(true); - TestEntity thirddEntity = new TestEntity(requestCacheStats, thirdReader, differentIdentity, 0); + TestEntity thirddEntity = new TestEntity(requestCacheStats, differentIdentity); + Loader thirdLoader = new Loader(thirdReader, 0); - BytesReference value1 = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + BytesReference value1 = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value1.streamInput().readString()); - BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value2.streamInput().readString()); logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); - BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); + BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termQuery.buildAsBytes()); assertEquals("baz", value3.streamInput().readString()); assertEquals(3, cache.count()); final long hitCount = requestCacheStats.stats().getHitCount(); @@ -289,7 +304,7 @@ public class IndicesRequestCacheTests extends ESTestCase { cache.cleanCache(); assertEquals(1, cache.count()); // third has not been validated since it's a different identity - value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); + value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termQuery.buildAsBytes()); assertEquals(hitCount + 1, requestCacheStats.stats().getHitCount()); assertEquals("baz", value3.streamInput().readString()); @@ -303,20 +318,39 @@ public class IndicesRequestCacheTests extends ESTestCase { StringField.TYPE_STORED)); } + private static class Loader implements Supplier { + + private final DirectoryReader reader; + private final int id; + public boolean loadedFromCache = true; + + public Loader(DirectoryReader reader, int id) { + super(); + this.reader = reader; + this.id = id; + } + + @Override + public BytesReference get() { + try (BytesStreamOutput out = new BytesStreamOutput()) { + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(new TermQuery(new Term("id", Integer.toString(id))), 1); + assertEquals(1, topDocs.totalHits); + Document document = reader.document(topDocs.scoreDocs[0].doc); + out.writeString(document.get("value")); + loadedFromCache = false; + return out.bytes(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + } + private class TestEntity extends AbstractIndexShardCacheEntity { private final AtomicBoolean standInForIndexShard; private final ShardRequestCache shardRequestCache; - private TestEntity(ShardRequestCache shardRequestCache, DirectoryReader reader, AtomicBoolean standInForIndexShard, int id) { - super(new Loader() { - @Override - public void load(StreamOutput out) throws IOException { - IndexSearcher searcher = new IndexSearcher(reader); - TopDocs topDocs = searcher.search(new TermQuery(new Term("id", Integer.toString(id))), 1); - assertEquals(1, topDocs.totalHits); - Document document = reader.document(topDocs.scoreDocs[0].doc); - out.writeString(document.get("value")); - } - }); + private TestEntity(ShardRequestCache shardRequestCache, AtomicBoolean standInForIndexShard) { this.standInForIndexShard = standInForIndexShard; this.shardRequestCache = shardRequestCache; } @@ -335,5 +369,10 @@ public class IndicesRequestCacheTests extends ESTestCase { public Object getCacheIdentity() { return standInForIndexShard; } + + @Override + public long ramBytesUsed() { + return 42; + } } } diff --git a/core/src/test/java/org/elasticsearch/indices/NodeIndicesStatsTests.java b/core/src/test/java/org/elasticsearch/indices/NodeIndicesStatsTests.java new file mode 100644 index 00000000000..f712a5ba843 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/indices/NodeIndicesStatsTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.object.HasToString.hasToString; + +public class NodeIndicesStatsTests extends ESTestCase { + + public void testInvalidLevel() { + final NodeIndicesStats stats = new NodeIndicesStats(); + final String level = randomAsciiOfLength(16); + final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level)); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> stats.toXContent(null, params)); + assertThat( + e, + hasToString(containsString("level parameter must be one of [indices] or [node] or [shards] but was [" + level + "]"))); + } + +} diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index 02cc8ff30a7..6f379e48bae 100644 --- a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -74,9 +74,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -/** - * - */ public class AnalysisModuleTests extends ModuleTestCase { public IndexAnalyzers getIndexAnalyzers(Settings settings) throws IOException { @@ -193,7 +190,7 @@ public class AnalysisModuleTests extends ModuleTestCase { .put("index.analysis.analyzer.foobar.type", "standard") .put("index.analysis.analyzer.foobar.alias","foobaz") // analyzer aliases were removed in v5.0.0 alpha6 - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_alpha6, null)) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_beta1, null)) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); AnalysisRegistry registry = getNewRegistry(settings); diff --git a/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java index cd2c34e5102..b79c7a31392 100644 --- a/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java +++ b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java @@ -37,9 +37,6 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; -/** - * - */ public class AnalyzeActionIT extends ESIntegTestCase { public void testSimpleAnalyzerTests() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 844228a512d..97b3924303d 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.util.Callback; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; @@ -59,7 +58,9 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; /** * Abstract base class for tests against {@link IndicesClusterStateService} @@ -90,7 +91,7 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC RoutingNode localRoutingNode = state.getRoutingNodes().node(state.getNodes().getLocalNodeId()); if (localRoutingNode != null) { if (enableRandomFailures == false) { - assertTrue("failed shard cache should be empty", failedShardsCache.isEmpty()); + assertThat("failed shard cache should be empty", failedShardsCache.values(), empty()); } // check that all shards in local routing nodes have been allocated for (ShardRouting shardRouting : localRoutingNode) { @@ -176,9 +177,10 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC private volatile Map indices = emptyMap(); @Override - public synchronized MockIndexService createIndex(NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData, - List buildInIndexListener, - Consumer globalCheckPointSyncer) throws IOException { + public synchronized MockIndexService createIndex( + IndexMetaData indexMetaData, + List buildInIndexListener, + Consumer globalCheckPointSyncer) throws IOException { MockIndexService indexService = new MockIndexService(new IndexSettings(indexMetaData, Settings.EMPTY)); indices = newMapBuilder(indices).put(indexMetaData.getIndexUUID(), indexService).immutableMap(); return indexService; @@ -225,8 +227,7 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC PeerRecoveryTargetService recoveryTargetService, PeerRecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService, - NodeServicesProvider nodeServicesProvider, Callback onShardFailure) - throws IOException { + Callback onShardFailure) throws IOException { failRandomly(); MockIndexService indexService = indexService(recoveryState.getShardId().getIndex()); MockIndexShard indexShard = indexService.createShard(shardRouting); @@ -276,6 +277,9 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC @Override public void updateMetaData(IndexMetaData indexMetaData) { indexSettings.updateIndexMetaData(indexMetaData); + for (MockIndexShard shard: shards.values()) { + shard.updateTerm(indexMetaData.primaryTerm(shard.shardId().id())); + } } @Override @@ -285,7 +289,7 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC public synchronized MockIndexShard createShard(ShardRouting routing) throws IOException { failRandomly(); - MockIndexShard shard = new MockIndexShard(routing); + MockIndexShard shard = new MockIndexShard(routing, indexSettings.getIndexMetaData().primaryTerm(routing.shardId().id())); shards = newMapBuilder(shards).put(routing.id(), shard).immutableMap(); return shard; } @@ -320,9 +324,11 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC private volatile RecoveryState recoveryState; private volatile Set activeAllocationIds; private volatile Set initializingAllocationIds; + private volatile long term; - public MockIndexShard(ShardRouting shardRouting) { + public MockIndexShard(ShardRouting shardRouting, long term) { this.shardRouting = shardRouting; + this.term = term; } @Override @@ -348,8 +354,12 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC @Override public void updateRoutingEntry(ShardRouting shardRouting) throws IOException { failRandomly(); - assert this.shardId().equals(shardRouting.shardId()); - assert this.shardRouting.isSameAllocation(shardRouting); + assertThat(this.shardId(), equalTo(shardRouting.shardId())); + assertTrue("current: " + this.shardRouting + ", got: " + shardRouting, this.shardRouting.isSameAllocation(shardRouting)); + if (this.shardRouting.active()) { + assertTrue("and active shard must stay active, current: " + this.shardRouting + ", got: " + shardRouting, + shardRouting.active()); + } this.shardRouting = shardRouting; } @@ -358,5 +368,13 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC this.activeAllocationIds = activeAllocationIds; this.initializingAllocationIds = initializingAllocationIds; } + + public void updateTerm(long newTerm) { + assertThat("term can only be incremented: " + shardRouting, newTerm, greaterThanOrEqualTo(term)); + if (shardRouting.primary() && shardRouting.active()) { + assertThat("term can not be changed on an active primary shard: " + shardRouting, newTerm, equalTo(term)); + } + this.term = newTerm; + } } } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index a5a7c63945f..fca09e74332 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -67,7 +67,6 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.ShardId; @@ -138,10 +137,10 @@ public class ClusterStateChanges extends AbstractComponent { try { @SuppressWarnings("unchecked") final List listeners = anyList(); @SuppressWarnings("unchecked") final Consumer globalCheckpointSyncer = any(Consumer.class); - when(indicesService.createIndex(any(NodeServicesProvider.class), any(IndexMetaData.class), listeners, globalCheckpointSyncer)) + when(indicesService.createIndex(any(IndexMetaData.class), listeners, globalCheckpointSyncer)) .then(invocationOnMock -> { IndexService indexService = mock(IndexService.class); - IndexMetaData indexMetaData = (IndexMetaData)invocationOnMock.getArguments()[1]; + IndexMetaData indexMetaData = (IndexMetaData)invocationOnMock.getArguments()[0]; when(indexService.index()).thenReturn(indexMetaData.getIndex()); MapperService mapperService = mock(MapperService.class); when(indexService.mapperService()).thenReturn(mapperService); @@ -155,7 +154,7 @@ public class ClusterStateChanges extends AbstractComponent { // services TransportService transportService = new TransportService(settings, transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, clusterSettings); MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(settings, null, null) { // metaData upgrader should do nothing @Override @@ -163,15 +162,14 @@ public class ClusterStateChanges extends AbstractComponent { return indexMetaData; } }; - NodeServicesProvider nodeServicesProvider = new NodeServicesProvider(threadPool, null, null, null, null, null, clusterService); MetaDataIndexStateService indexStateService = new MetaDataIndexStateService(settings, clusterService, allocationService, - metaDataIndexUpgradeService, nodeServicesProvider, indicesService); + metaDataIndexUpgradeService, indicesService); MetaDataDeleteIndexService deleteIndexService = new MetaDataDeleteIndexService(settings, clusterService, allocationService); MetaDataUpdateSettingsService metaDataUpdateSettingsService = new MetaDataUpdateSettingsService(settings, clusterService, - allocationService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, indicesService, nodeServicesProvider); + allocationService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, indicesService); MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(settings, clusterService, indicesService, allocationService, new AliasValidator(settings), environment, - nodeServicesProvider, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, threadPool); + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, threadPool); transportCloseIndexAction = new TransportCloseIndexAction(settings, transportService, clusterService, threadPool, indexStateService, clusterSettings, actionFilters, indexNameExpressionResolver, destructiveOperations); diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index f2fe3d227a7..86e2a35f1b5 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.cluster; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; @@ -42,7 +43,6 @@ import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; @@ -62,6 +62,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; @@ -79,7 +80,6 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice // we have an IndicesClusterStateService per node in the cluster final Map clusterStateServiceMap = new HashMap<>(); ClusterState state = randomInitialClusterState(clusterStateServiceMap, MockIndicesService::new); - // each of the following iterations represents a new cluster state update processed on all nodes for (int i = 0; i < 30; i++) { logger.info("Iteration {}", i); @@ -87,7 +87,14 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice // calculate new cluster state for (int j = 0; j < randomInt(3); j++) { // multiple iterations to simulate batching of cluster states - state = randomlyUpdateClusterState(state, clusterStateServiceMap, MockIndicesService::new); + try { + state = randomlyUpdateClusterState(state, clusterStateServiceMap, MockIndicesService::new); + } catch (AssertionError error) { + ClusterState finalState = state; + logger.error((org.apache.logging.log4j.util.Supplier) () -> + new ParameterizedMessage("failed to random change state. last good state: \n{}", finalState), error); + throw error; + } } // apply cluster state to nodes (incl. master) @@ -95,7 +102,15 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node); ClusterState localState = adaptClusterStateToLocalNode(state, node); ClusterState previousLocalState = adaptClusterStateToLocalNode(previousState, node); - indicesClusterStateService.clusterChanged(new ClusterChangedEvent("simulated change " + i, localState, previousLocalState)); + final ClusterChangedEvent event = new ClusterChangedEvent("simulated change " + i, localState, previousLocalState); + try { + indicesClusterStateService.clusterChanged(event); + } catch (AssertionError error) { + logger.error((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "failed to apply change on [{}].\n *** Previous state ***\n{}\n *** New state ***\n{}", + node, event.previousState(), event.state()), error); + throw error; + } // check that cluster state has been properly applied to node assertClusterStateMatchesNodeState(localState, indicesClusterStateService); @@ -103,7 +118,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice } // TODO: check if we can go to green by starting all shards and finishing all iterations - logger.info("Final cluster state: {}", state.prettyPrint()); + logger.info("Final cluster state: {}", state); } /** @@ -138,7 +153,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice // simulate the cluster state change on the node ClusterState localState = adaptClusterStateToLocalNode(stateWithIndex, node); ClusterState previousLocalState = adaptClusterStateToLocalNode(initialState, node); - IndicesClusterStateService indicesCSSvc = createIndicesClusterStateService(RecordingIndicesService::new); + IndicesClusterStateService indicesCSSvc = createIndicesClusterStateService(node, RecordingIndicesService::new); indicesCSSvc.start(); indicesCSSvc.clusterChanged(new ClusterChangedEvent("cluster state change that adds the index", localState, previousLocalState)); @@ -184,7 +199,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice Supplier indicesServiceSupplier) { for (DiscoveryNode node : state.nodes()) { clusterStateServiceMap.computeIfAbsent(node, discoveryNode -> { - IndicesClusterStateService ics = createIndicesClusterStateService(indicesServiceSupplier); + IndicesClusterStateService ics = createIndicesClusterStateService(discoveryNode, indicesServiceSupplier); ics.start(); return ics; }); @@ -314,6 +329,13 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice state = cluster.deassociateDeadNodes(state, true, "removed and added a node"); updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); } + if (randomBoolean()) { + // and add it back + DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(discoveryNode).build(); + state = ClusterState.builder(state).nodes(newNodes).build(); + state = cluster.reroute(state, new ClusterRerouteRequest()); + updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); + } } } } @@ -323,12 +345,15 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice return state; } - private DiscoveryNode createNode(DiscoveryNode.Role... mustHaveRoles) { + private static final AtomicInteger nodeIdGenerator = new AtomicInteger(); + + protected DiscoveryNode createNode(DiscoveryNode.Role... mustHaveRoles) { Set roles = new HashSet<>(randomSubsetOf(Sets.newHashSet(DiscoveryNode.Role.values()))); for (DiscoveryNode.Role mustHaveRole : mustHaveRoles) { roles.add(mustHaveRole); } - return new DiscoveryNode("node_" + randomAsciiOfLength(8), LocalTransportAddress.buildUnique(), Collections.emptyMap(), roles, + final String id = String.format(Locale.ROOT, "node_%03d", nodeIdGenerator.incrementAndGet()); + return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, Version.CURRENT); } @@ -336,21 +361,23 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice return ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(node.getId())).build(); } - private IndicesClusterStateService createIndicesClusterStateService(final Supplier indicesServiceSupplier) { + private IndicesClusterStateService createIndicesClusterStateService(DiscoveryNode discoveryNode, + final Supplier indicesServiceSupplier) { final ThreadPool threadPool = mock(ThreadPool.class); final Executor executor = mock(Executor.class); when(threadPool.generic()).thenReturn(executor); final MockIndicesService indicesService = indicesServiceSupplier.get(); - final TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + final Settings settings = Settings.builder().put("node.name", discoveryNode.getName()).build(); + final TransportService transportService = new TransportService(settings, null, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); final ClusterService clusterService = mock(ClusterService.class); - final RepositoriesService repositoriesService = new RepositoriesService(Settings.EMPTY, clusterService, + final RepositoriesService repositoriesService = new RepositoriesService(settings, clusterService, transportService, null); - final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(Settings.EMPTY, threadPool, + final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(settings, threadPool, transportService, null, clusterService); final ShardStateAction shardStateAction = mock(ShardStateAction.class); return new IndicesClusterStateService( - Settings.EMPTY, + settings, indicesService, clusterService, threadPool, diff --git a/core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsIT.java b/core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsIT.java index 6052add7e36..1f56e4cfc57 100644 --- a/core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsIT.java @@ -19,7 +19,8 @@ package org.elasticsearch.indices.exists.indices; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequestBuilder; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.test.ESIntegTestCase; @@ -29,21 +30,33 @@ import java.util.Arrays; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; public class IndicesExistsIT extends ESIntegTestCase { // Indices exists never throws IndexMissingException, the indices options control its behaviour (return true or false) public void testIndicesExists() throws Exception { - assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false)); - assertThat(client().admin().indices().prepareExists("foo").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(false)); - assertThat(client().admin().indices().prepareExists("foo*").setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false)).get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false)); + assertFalse(client().admin().indices().prepareExists("foo").get().isExists()); + assertFalse(client().admin().indices().prepareExists("foo*").get().isExists()); + assertFalse(client().admin().indices().prepareExists("_all").get().isExists()); createIndex("foo", "foobar", "bar", "barbaz"); + IndicesExistsRequestBuilder indicesExistsRequestBuilder = client().admin().indices().prepareExists("foo*") + .setExpandWildcardsOpen(false); + IndicesExistsRequest request = indicesExistsRequestBuilder.request(); + //check that ignore unavailable and allow no indices are set to false. That is their only valid value as it can't be overridden + assertFalse(request.indicesOptions().ignoreUnavailable()); + assertFalse(request.indicesOptions().allowNoIndices()); + assertThat(indicesExistsRequestBuilder.get().isExists(), equalTo(false)); + + assertAcked(client().admin().indices().prepareClose("foobar").get()); + assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("foo*").setExpandWildcardsOpen(false) + .setExpandWildcardsClosed(false).get().isExists(), equalTo(false)); assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("foob*").setExpandWildcardsClosed(false).get().isExists(), equalTo(false)); assertThat(client().admin().indices().prepareExists("bar*").get().isExists(), equalTo(true)); assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(true)); assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(true)); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index ea2a80bada5..c35df81cfd6 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -38,8 +38,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; -/** - */ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testModificationPreventsFlushing() throws InterruptedException { diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java index 8d319ae5b8d..6b96e4e102d 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java @@ -39,9 +39,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBloc import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; -/** - * - */ @ClusterScope(randomDynamicTemplates = false) public class SimpleGetMappingsIT extends ESIntegTestCase { public void testGetMappingsWhereThereAreNone() { diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index bf755557a11..ed371eca895 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -55,9 +55,11 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING; import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; @@ -67,6 +69,7 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; /** @@ -80,20 +83,15 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // clear all caches, we could be very close (or even above) the limit and then we will not be able to reset the breaker settings client().admin().indices().prepareClearCache().setFieldDataCache(true).setQueryCache(true).setRequestCache(true).get(); - Settings resetSettings = Settings.builder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), - HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getDefaultRaw(null)) - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), - HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getDefaultRaw(null)) - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), - HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getDefaultRaw(null)) - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) - .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), - HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getDefaultRaw(null)) - .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) - .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), - HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getDefaultRaw(null)) - .build(); + Settings.Builder resetSettings = Settings.builder(); + Stream.of( + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, + IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING, + HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING).forEach(s -> resetSettings.putNull(s.getKey())); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings)); } @@ -409,6 +407,26 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { assertThat(breaks, greaterThanOrEqualTo(1)); } + public void testCanResetUnreasonableSettings() { + if (noopBreakerUsed()) { + logger.info("--> noop breakers used, skipping test"); + return; + } + Settings insane = Settings.builder() + .put(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "5b") + .build(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(insane).get(); + + // calls updates settings to reset everything to default, checking that the request + // is not blocked by the above inflight circuit breaker + reset(); + + assertThat(client().admin().cluster().prepareState().get() + .getState().metaData().transientSettings().get(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()), + nullValue()); + + } + public void testLimitsRequestSize() throws Exception { ByteSizeValue inFlightRequestsLimit = new ByteSizeValue(8, ByteSizeUnit.KB); if (noopBreakerUsed()) { @@ -452,7 +470,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { } Settings limitSettings = Settings.builder() - .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), inFlightRequestsLimit) + .put(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), inFlightRequestsLimit) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(limitSettings)); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 01eb2a6e524..dfe9a09fb2a 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -47,6 +47,7 @@ import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState.Stage; +import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotState; @@ -73,6 +74,7 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; @@ -83,9 +85,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; -/** - * - */ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class IndexRecoveryIT extends ESIntegTestCase { @@ -101,7 +100,8 @@ public class IndexRecoveryIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class, MockFSIndexStore.TestPlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class, MockFSIndexStore.TestPlugin.class, + RecoverySettingsChunkSizePlugin.class); } private void assertRecoveryStateWithoutStage(RecoveryState state, int shardId, RecoverySource recoverySource, boolean primary, @@ -137,26 +137,21 @@ public class IndexRecoveryIT extends ESIntegTestCase { private void slowDownRecovery(ByteSizeValue shardSize) { long chunkSize = Math.max(1, shardSize.getBytes() / 10); - for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) { - setChunkSize(settings, new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES)); - } assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() // one chunk per sec.. .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES) - ) - .get().isAcknowledged()); + // small chunks + .put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES)) + ).get().isAcknowledged()); } private void restoreRecoverySpeed() { - for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) { - setChunkSize(settings, RecoverySettings.DEFAULT_CHUNK_SIZE); - } assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20mb") - ) - .get().isAcknowledged()); + .put(CHUNK_SIZE_SETTING.getKey(), RecoverySettings.DEFAULT_CHUNK_SIZE) + ).get().isAcknowledged()); } public void testGatewayRecovery() throws Exception { @@ -659,8 +654,4 @@ public class IndexRecoveryIT extends ESIntegTestCase { transport.sendRequest(node, requestId, action, request, options); } } - - public static void setChunkSize(RecoverySettings recoverySettings, ByteSizeValue chunksSize) { - recoverySettings.setChunkSize(chunksSize); - } } diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java new file mode 100644 index 00000000000..54e08209876 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.recovery; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.TimeUnit; + +public class RecoverySettingsDynamicUpdateTests extends ESTestCase { + private final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + private final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, clusterSettings); + + public void testZeroBytesPerSecondIsNoRateLimit() { + clusterSettings.applySettings(Settings.builder().put( + RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), 0).build()); + assertEquals(null, recoverySettings.rateLimiter()); + } + + public void testRetryDelayStateSync() { + long duration = between(1, 1000); + TimeUnit timeUnit = randomFrom(TimeUnit.MILLISECONDS, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS); + clusterSettings.applySettings(Settings.builder().put( + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), duration, timeUnit + ).build()); + assertEquals(new TimeValue(duration, timeUnit), recoverySettings.retryDelayStateSync()); + } + + public void testRetryDelayNetwork() { + long duration = between(1, 1000); + TimeUnit timeUnit = randomFrom(TimeUnit.MILLISECONDS, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS); + clusterSettings.applySettings(Settings.builder().put( + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), duration, timeUnit + ).build()); + assertEquals(new TimeValue(duration, timeUnit), recoverySettings.retryDelayNetwork()); + } + + public void testActivityTimeout() { + long duration = between(1, 1000); + TimeUnit timeUnit = randomFrom(TimeUnit.MILLISECONDS, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS); + clusterSettings.applySettings(Settings.builder().put( + RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), duration, timeUnit + ).build()); + assertEquals(new TimeValue(duration, timeUnit), recoverySettings.activityTimeout()); + } + + public void testInternalActionTimeout() { + long duration = between(1, 1000); + TimeUnit timeUnit = randomFrom(TimeUnit.MILLISECONDS, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS); + clusterSettings.applySettings(Settings.builder().put( + RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), duration, timeUnit + ).build()); + assertEquals(new TimeValue(duration, timeUnit), recoverySettings.internalActionTimeout()); + } + + public void testInternalLongActionTimeout() { + long duration = between(1, 1000); + TimeUnit timeUnit = randomFrom(TimeUnit.MILLISECONDS, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS); + clusterSettings.applySettings(Settings.builder().put( + RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(), duration, timeUnit + ).build()); + assertEquals(new TimeValue(duration, timeUnit), recoverySettings.internalActionLongTimeout()); + } +} diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index c839d56508d..1dce300ef0c 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -40,7 +40,6 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardRelocatedException; @@ -81,8 +80,8 @@ public class RecoverySourceHandlerTests extends ESTestCase { put("indices.recovery.concurrent_small_file_streams", 1).build(); final RecoverySettings recoverySettings = new RecoverySettings(settings, service); StartRecoveryRequest request = new StartRecoveryRequest(shardId, - new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), null, randomBoolean(), randomLong()); Store store = newStore(createTempDir()); RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, () -> 0L, e -> () -> {}, @@ -133,8 +132,8 @@ public class RecoverySourceHandlerTests extends ESTestCase { put("indices.recovery.concurrent_small_file_streams", 1).build(); final RecoverySettings recoverySettings = new RecoverySettings(settings, service); StartRecoveryRequest request = new StartRecoveryRequest(shardId, - new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), null, randomBoolean(), randomLong()); Path tempDir = createTempDir(); Store store = newStore(tempDir, false); @@ -197,8 +196,8 @@ public class RecoverySourceHandlerTests extends ESTestCase { put("indices.recovery.concurrent_small_file_streams", 1).build(); final RecoverySettings recoverySettings = new RecoverySettings(settings, service); StartRecoveryRequest request = new StartRecoveryRequest(shardId, - new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), null, randomBoolean(), randomLong()); Path tempDir = createTempDir(); Store store = newStore(tempDir, false); @@ -256,8 +255,8 @@ public class RecoverySourceHandlerTests extends ESTestCase { public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Completed() throws IOException { final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service); StartRecoveryRequest request = new StartRecoveryRequest(shardId, - new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), null, false, randomLong()); IndexShard shard = mock(IndexShard.class); Translog.View translogView = mock(Translog.View.class); @@ -286,8 +285,8 @@ public class RecoverySourceHandlerTests extends ESTestCase { public void testWaitForClusterStateOnPrimaryRelocation() throws IOException, InterruptedException { final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service); StartRecoveryRequest request = new StartRecoveryRequest(shardId, - new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), null, true, randomLong()); AtomicBoolean phase1Called = new AtomicBoolean(); AtomicBoolean phase2Called = new AtomicBoolean(); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java index 55359a935f6..2c00c59c343 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java @@ -22,7 +22,6 @@ import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.IndexOutput; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -42,7 +41,7 @@ public class RecoveryStatusTests extends ESSingleNodeTestCase { IndexService service = createIndex("foo"); IndexShard indexShard = service.getShardOrNull(0); - DiscoveryNode node = new DiscoveryNode("foo", new LocalTransportAddress("bar"), emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); RecoveryTarget status = new RecoveryTarget(indexShard, node, new PeerRecoveryTargetService.RecoveryListener() { @Override public void onRecoveryDone(RecoveryState state) { diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java index 6d37ae7d0d7..1245725e055 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.RecoveryState.File; import org.elasticsearch.indices.recovery.RecoveryState.Index; @@ -342,7 +341,7 @@ public class RecoveryTargetTests extends ESTestCase { } public void testStageSequenceEnforcement() { - final DiscoveryNode discoveryNode = new DiscoveryNode("1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), + final DiscoveryNode discoveryNode = new DiscoveryNode("1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); Stage[] stages = Stage.values(); int i = randomIntBetween(0, stages.length - 1); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java index 7065ffa5dfc..691b043e86a 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.test.ESTestCase; @@ -36,15 +35,13 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.Matchers.equalTo; -/** - */ public class StartRecoveryRequestTests extends ESTestCase { public void testSerialization() throws Exception { Version targetNodeVersion = randomVersion(random()); StartRecoveryRequest outRequest = new StartRecoveryRequest( new ShardId("test", "_na_", 0), - new DiscoveryNode("a", new LocalTransportAddress("1"), emptyMap(), emptySet(), targetNodeVersion), - new DiscoveryNode("b", new LocalTransportAddress("1"), emptyMap(), emptySet(), targetNodeVersion), + new DiscoveryNode("a", buildNewFakeTransportAddress(), emptyMap(), emptySet(), targetNodeVersion), + new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), targetNodeVersion), Store.MetadataSnapshot.EMPTY, randomBoolean(), 1L diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index cd562e0fde6..e48d5fe2628 100644 --- a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -379,6 +379,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { public void testUpdateAutoThrottleSettings() throws IllegalAccessException { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); + mockAppender.start(); Logger rootLogger = LogManager.getRootLogger(); Level savedLevel = rootLogger.getLevel(); Loggers.addAppender(rootLogger, mockAppender); @@ -414,6 +415,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { assertThat(getSettingsResponse.getSetting("test", MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey()), equalTo("false")); } finally { Loggers.removeAppender(rootLogger, mockAppender); + mockAppender.stop(); Loggers.setLevel(rootLogger, savedLevel); } } @@ -468,6 +470,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { // #6882: make sure we can change index.merge.scheduler.max_thread_count live public void testUpdateMergeMaxThreadCount() throws IllegalAccessException { MockAppender mockAppender = new MockAppender("testUpdateMergeMaxThreadCount"); + mockAppender.start(); Logger rootLogger = LogManager.getRootLogger(); Level savedLevel = rootLogger.getLevel(); Loggers.addAppender(rootLogger, mockAppender); @@ -505,6 +508,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { } finally { Loggers.removeAppender(rootLogger, mockAppender); + mockAppender.stop(); Loggers.setLevel(rootLogger, savedLevel); } } diff --git a/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java index 8ec629dbbdc..54bdfd05008 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -18,99 +18,48 @@ */ package org.elasticsearch.indices.state; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; -import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.junit.After; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; -@ClusterScope(scope=Scope.TEST, numDataNodes=2) public class CloseIndexDisableCloseAllIT extends ESIntegTestCase { - // Combined multiple tests into one, because cluster scope is test. - // The cluster scope is test b/c we can't clear cluster settings. - public void testCloseAllRequiresName() { - Settings clusterSettings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) + + @After + public void afterTest() { + Settings settings = Settings.builder().put(TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey(), (String)null) .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(clusterSettings)); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + + public void testCloseAllRequiresName() { createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); - assertThat(healthResponse.isTimedOut(), equalTo(false)); - // Close all explicitly - try { - client().admin().indices().prepareClose("_all").execute().actionGet(); - fail(); - } catch (IllegalArgumentException e) { - } - - // Close all wildcard - try { - client().admin().indices().prepareClose("*").execute().actionGet(); - fail(); - } catch (IllegalArgumentException e) { - } - - // Close all wildcard - try { - client().admin().indices().prepareClose("test*").execute().actionGet(); - fail(); - } catch (IllegalArgumentException e) { - } - - // Close all wildcard - try { - client().admin().indices().prepareClose("*", "-test1").execute().actionGet(); - fail(); - } catch (IllegalArgumentException e) { - } - - // Close all wildcard - try { - client().admin().indices().prepareClose("*", "-test1", "+test1").execute().actionGet(); - fail(); - } catch (IllegalArgumentException e) { - } - - CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test3", "test2").execute().actionGet(); - assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); + assertAcked(client().admin().indices().prepareClose("test3", "test2")); assertIndexIsClosed("test2", "test3"); // disable closing - Client client = client(); createIndex("test_no_close"); - healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); - assertThat(healthResponse.isTimedOut(), equalTo(false)); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey(), false)).get(); + Settings settings = Settings.builder().put(TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey(), false).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - try { - client.admin().indices().prepareClose("test_no_close").execute().actionGet(); - fail("exception expected"); - } catch (IllegalStateException ex) { - assertEquals(ex.getMessage(), "closing indices is disabled - set [cluster.indices.close.enable: true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); - } + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> client().admin().indices().prepareClose("test_no_close").get()); + assertEquals(illegalStateException.getMessage(), + "closing indices is disabled - set [cluster.indices.close.enable: true] to enable it. NOTE: closed indices still " + + "consume a significant amount of diskspace"); } private void assertIndexIsClosed(String... indices) { - checkIndexState(IndexMetaData.State.CLOSE, indices); - } - - private void checkIndexState(IndexMetaData.State state, String... indices) { ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet(); for (String index : indices) { IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().indices().get(index); - assertThat(indexMetaData, notNullValue()); - assertThat(indexMetaData.getState(), equalTo(state)); + assertNotNull(indexMetaData); + assertEquals(IndexMetaData.State.CLOSE, indexMetaData.getState()); } } } diff --git a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index 4f97264af9f..6f9668c368e 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -70,22 +70,16 @@ public class OpenCloseIndexIT extends ESIntegTestCase { public void testSimpleCloseMissingIndex() { Client client = client(); - try { - client.admin().indices().prepareClose("test1").execute().actionGet(); - fail("Expected IndexNotFoundException"); - } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); - } + Exception e = expectThrows(IndexNotFoundException.class, () -> + client.admin().indices().prepareClose("test1").execute().actionGet()); + assertThat(e.getMessage(), is("no such index")); } public void testSimpleOpenMissingIndex() { Client client = client(); - try { - client.admin().indices().prepareOpen("test1").execute().actionGet(); - fail("Expected IndexNotFoundException"); - } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); - } + Exception e = expectThrows(IndexNotFoundException.class, () -> + client.admin().indices().prepareOpen("test1").execute().actionGet()); + assertThat(e.getMessage(), is("no such index")); } public void testCloseOneMissingIndex() { @@ -93,12 +87,9 @@ public class OpenCloseIndexIT extends ESIntegTestCase { createIndex("test1"); ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - try { - client.admin().indices().prepareClose("test1", "test2").execute().actionGet(); - fail("Expected IndexNotFoundException"); - } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); - } + Exception e = expectThrows(IndexNotFoundException.class, () -> + client.admin().indices().prepareClose("test1", "test2").execute().actionGet()); + assertThat(e.getMessage(), is("no such index")); } public void testCloseOneMissingIndexIgnoreMissing() { @@ -117,12 +108,9 @@ public class OpenCloseIndexIT extends ESIntegTestCase { createIndex("test1"); ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - try { - client.admin().indices().prepareOpen("test1", "test2").execute().actionGet(); - fail("Expected IndexNotFoundException"); - } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index")); - } + Exception e = expectThrows(IndexNotFoundException.class, () -> + client.admin().indices().prepareOpen("test1", "test2").execute().actionGet()); + assertThat(e.getMessage(), is("no such index")); } public void testOpenOneMissingIndexIgnoreMissing() { @@ -204,42 +192,30 @@ public class OpenCloseIndexIT extends ESIntegTestCase { public void testCloseNoIndex() { Client client = client(); - try { - client.admin().indices().prepareClose().execute().actionGet(); - fail("Expected ActionRequestValidationException"); - } catch (ActionRequestValidationException e) { - assertThat(e.getMessage(), containsString("index is missing")); - } + Exception e = expectThrows(ActionRequestValidationException.class, () -> + client.admin().indices().prepareClose().execute().actionGet()); + assertThat(e.getMessage(), containsString("index is missing")); } public void testCloseNullIndex() { Client client = client(); - try { - client.admin().indices().prepareClose((String[])null).execute().actionGet(); - fail("Expected ActionRequestValidationException"); - } catch (ActionRequestValidationException e) { - assertThat(e.getMessage(), containsString("index is missing")); - } + Exception e = expectThrows(ActionRequestValidationException.class, () -> + client.admin().indices().prepareClose((String[])null).execute().actionGet()); + assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenNoIndex() { Client client = client(); - try { - client.admin().indices().prepareOpen().execute().actionGet(); - fail("Expected ActionRequestValidationException"); - } catch (ActionRequestValidationException e) { - assertThat(e.getMessage(), containsString("index is missing")); - } + Exception e = expectThrows(ActionRequestValidationException.class, () -> + client.admin().indices().prepareOpen().execute().actionGet()); + assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenNullIndex() { Client client = client(); - try { - client.admin().indices().prepareOpen((String[])null).execute().actionGet(); - fail("Expected ActionRequestValidationException"); - } catch (ActionRequestValidationException e) { - assertThat(e.getMessage(), containsString("index is missing")); - } + Exception e = expectThrows(ActionRequestValidationException.class, () -> + client.admin().indices().prepareOpen((String[])null).execute().actionGet()); + assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenAlreadyOpenedIndex() { diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 2ca8947cbf8..8582ca0e02f 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -41,9 +41,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.index.Index; @@ -73,12 +71,15 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -/** - */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0) -@ESIntegTestCase.SuppressLocalMode @TestLogging("_root:DEBUG") public class RareClusterStateIT extends ESIntegTestCase { + + @Override + protected boolean addMockZenPings() { + return false; + } + @Override protected int numberOfShards() { return 1; @@ -126,7 +127,7 @@ public class RareClusterStateIT extends ESIntegTestCase { // inject a node ClusterState.Builder builder = ClusterState.builder(currentState); builder.nodes(DiscoveryNodes.builder(currentState.nodes()).add(new DiscoveryNode("_non_existent", - LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT))); + buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT))); // open index final IndexMetaData indexMetaData = IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.OPEN).build(); @@ -170,9 +171,7 @@ public class RareClusterStateIT extends ESIntegTestCase { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/14932") public void testDeleteCreateInOneBulk() throws Exception { - internalCluster().startNodesAsync(2, Settings.builder() - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen") - .build()).get(); + internalCluster().startNodesAsync(2).get(); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); prepareCreate("test").setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true).addMapping("type").get(); ensureGreen("test"); diff --git a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java index 0515887a550..5ec6d82117a 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java @@ -38,9 +38,6 @@ import org.elasticsearch.test.ESIntegTestCase; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - * - */ @ESIntegTestCase.ClusterScope(minNumDataNodes = 2) public class SimpleIndexStateIT extends ESIntegTestCase { private final Logger logger = Loggers.getLogger(SimpleIndexStateIT.class); diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index b95d872a61e..a3f22817dad 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -489,11 +489,11 @@ public class IndexStatsIT extends ESIntegTestCase { } catch (VersionConflictEngineException e) {} stats = client().admin().indices().prepareStats().setTypes("type1", "type2").execute().actionGet(); - assertThat(stats.getIndex("test1").getTotal().getIndexing().getTotal().getIndexFailedCount(), equalTo(2L)); - assertThat(stats.getIndex("test2").getTotal().getIndexing().getTotal().getIndexFailedCount(), equalTo(1L)); + assertThat(stats.getIndex("test1").getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(2L)); + assertThat(stats.getIndex("test2").getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(1L)); assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexFailedCount(), equalTo(1L)); assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type2").getIndexFailedCount(), equalTo(1L)); - assertThat(stats.getTotal().getIndexing().getTotal().getIndexFailedCount(), equalTo(3L)); + assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(3L)); } public void testMergeStats() { diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index b248fc811f6..a6420034c42 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -74,9 +74,6 @@ import static java.lang.Thread.sleep; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -/** - * - */ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class IndicesStoreIntegrationIT extends ESIntegTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java index 62b5bc30a68..74370b711d6 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -54,8 +53,6 @@ import static org.elasticsearch.Version.CURRENT; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.VersionUtils.randomVersion; -/** - */ public class IndicesStoreTests extends ESTestCase { private static final ShardRoutingState[] NOT_STARTED_STATES; @@ -86,9 +83,11 @@ public class IndicesStoreTests extends ESTestCase { @Before public void before() { - localNode = new DiscoveryNode("abc", new LocalTransportAddress("abc"), emptyMap(), emptySet(), Version.CURRENT); + localNode = new DiscoveryNode("abc", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); clusterService = createClusterService(threadPool); - indicesStore = new IndicesStore(Settings.EMPTY, null, clusterService, new TransportService(clusterService.getSettings(), null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR), null); + TransportService transportService = new TransportService(clusterService.getSettings(), null, null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); + indicesStore = new IndicesStore(Settings.EMPTY, null, clusterService, transportService, null); } @After @@ -142,7 +141,7 @@ public class IndicesStoreTests extends ESTestCase { ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).add(localNode).add(new DiscoveryNode("xyz", - new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT))); + buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT))); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); int localShardId = randomInt(numShards - 1); for (int i = 0; i < numShards; i++) { @@ -187,7 +186,7 @@ public class IndicesStoreTests extends ESTestCase { ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).add(localNode).add(new DiscoveryNode("xyz", - new LocalTransportAddress("xyz"), emptyMap(), emptySet(), nodeVersion))); + buildNewFakeTransportAddress(), emptyMap(), emptySet(), nodeVersion))); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); for (int i = 0; i < numShards; i++) { routingTable.addShard(TestShardRouting.newShardRouting("test", i, "xyz", null, true, ShardRoutingState.STARTED)); @@ -210,8 +209,8 @@ public class IndicesStoreTests extends ESTestCase { clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()) .add(localNode) - .add(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT)) - .add(new DiscoveryNode("def", new LocalTransportAddress("def"), emptyMap(), emptySet(), nodeVersion) // <-- only set relocating, since we're testing that in this test + .add(new DiscoveryNode("xyz", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)) + .add(new DiscoveryNode("def", buildNewFakeTransportAddress(), emptyMap(), emptySet(), nodeVersion) // <-- only set relocating, since we're testing that in this test )); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); for (int i = 0; i < numShards; i++) { diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index e40361e94f2..b1a4d19894e 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -63,9 +63,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class SimpleIndexTemplateIT extends ESIntegTestCase { @After diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestClientIT.java b/core/src/test/java/org/elasticsearch/ingest/IngestClientIT.java index 2b78cd83fb6..2c3730dc422 100644 --- a/core/src/test/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/core/src/test/java/org/elasticsearch/ingest/IngestClientIT.java @@ -43,7 +43,6 @@ import org.elasticsearch.test.ESIntegTestCase; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutionException; @@ -229,9 +228,4 @@ public class IngestClientIT extends ESIntegTestCase { assertThat(ex.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); } } - - @Override - protected Collection> getMockPlugins() { - return Collections.singletonList(TestSeedPlugin.class); - } } diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index c91e1e5f6ec..02fe0d03c77 100644 --- a/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -60,11 +60,6 @@ public class IngestProcessorNotInstalledOnAllNodesIT extends ESIntegTestCase { return installPlugin ? Arrays.asList(IngestTestPlugin.class) : Collections.emptyList(); } - @Override - protected Collection> getMockPlugins() { - return Collections.singletonList(TestSeedPlugin.class); - } - public void testFailPipelineCreation() throws Exception { installPlugin = true; String node1 = internalCluster().startNode(); diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index acf5c26e565..b9426b83e66 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -317,7 +317,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { int numRequest = scaledRandomIntBetween(8, 64); int numIndexRequests = 0; for (int i = 0; i < numRequest; i++) { - ActionRequest request; + DocWriteRequest request; if (randomBoolean()) { if (randomBoolean()) { request = new DeleteRequest("_index", "_type", "_id"); diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java index cdbe1e11570..4e90f5346d2 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -264,9 +263,9 @@ public class PipelineStoreTests extends ESTestCase { PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray( "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}},{\"remove\" : {\"field\": \"_field\"}}]}")); - DiscoveryNode node1 = new DiscoveryNode("_node_id1", new LocalTransportAddress("_id"), + DiscoveryNode node1 = new DiscoveryNode("_node_id1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - DiscoveryNode node2 = new DiscoveryNode("_node_id2", new LocalTransportAddress("_id"), + DiscoveryNode node2 = new DiscoveryNode("_node_id2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); Map ingestInfos = new HashMap<>(); ingestInfos.put(node1, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); @@ -293,7 +292,7 @@ public class PipelineStoreTests extends ESTestCase { assertThat(e.getMessage(), equalTo("Ingest info is empty")); } - DiscoveryNode discoveryNode = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), + DiscoveryNode discoveryNode = new DiscoveryNode("_node_id", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); IngestInfo ingestInfo = new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))); store.validatePipeline(Collections.singletonMap(discoveryNode, ingestInfo), putRequest); diff --git a/core/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java b/core/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java index e022bd75213..5c499eae4d8 100644 --- a/core/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java +++ b/core/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java @@ -36,12 +36,14 @@ import java.util.Map; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; public class SimpleMgetIT extends ESIntegTestCase { + public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException { createIndex("test"); @@ -51,7 +53,7 @@ public class SimpleMgetIT extends ESIntegTestCase { MultiGetResponse mgetResponse = client().prepareMultiGet() .add(new MultiGetRequest.Item("test", "test", "1")) .add(new MultiGetRequest.Item("nonExistingIndex", "test", "1")) - .execute().actionGet(); + .get(); assertThat(mgetResponse.getResponses().length, is(2)); assertThat(mgetResponse.getResponses()[0].getIndex(), is("test")); @@ -60,19 +62,47 @@ public class SimpleMgetIT extends ESIntegTestCase { assertThat(mgetResponse.getResponses()[1].getIndex(), is("nonExistingIndex")); assertThat(mgetResponse.getResponses()[1].isFailed(), is(true)); assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), is("no such index")); - assertThat(((ElasticsearchException) mgetResponse.getResponses()[1].getFailure().getFailure()).getIndex().getName(), is("nonExistingIndex")); - + assertThat(((ElasticsearchException) mgetResponse.getResponses()[1].getFailure().getFailure()).getIndex().getName(), + is("nonExistingIndex")); mgetResponse = client().prepareMultiGet() .add(new MultiGetRequest.Item("nonExistingIndex", "test", "1")) - .execute().actionGet(); + .get(); assertThat(mgetResponse.getResponses().length, is(1)); assertThat(mgetResponse.getResponses()[0].getIndex(), is("nonExistingIndex")); assertThat(mgetResponse.getResponses()[0].isFailed(), is(true)); assertThat(mgetResponse.getResponses()[0].getFailure().getMessage(), is("no such index")); - assertThat(((ElasticsearchException) mgetResponse.getResponses()[0].getFailure().getFailure()).getIndex().getName(), is("nonExistingIndex")); + assertThat(((ElasticsearchException) mgetResponse.getResponses()[0].getFailure().getFailure()).getIndex().getName(), + is("nonExistingIndex")); + } + public void testThatMgetShouldWorkWithMultiIndexAlias() throws IOException { + assertAcked(prepareCreate("test").addAlias(new Alias("multiIndexAlias"))); + assertAcked(prepareCreate("test2").addAlias(new Alias("multiIndexAlias"))); + client().prepareIndex("test", "test", "1").setSource(jsonBuilder().startObject().field("foo", "bar").endObject()) + .setRefreshPolicy(IMMEDIATE).get(); + + MultiGetResponse mgetResponse = client().prepareMultiGet() + .add(new MultiGetRequest.Item("test", "test", "1")) + .add(new MultiGetRequest.Item("multiIndexAlias", "test", "1")) + .get(); + assertThat(mgetResponse.getResponses().length, is(2)); + + assertThat(mgetResponse.getResponses()[0].getIndex(), is("test")); + assertThat(mgetResponse.getResponses()[0].isFailed(), is(false)); + + assertThat(mgetResponse.getResponses()[1].getIndex(), is("multiIndexAlias")); + assertThat(mgetResponse.getResponses()[1].isFailed(), is(true)); + assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), containsString("more than one indices")); + + mgetResponse = client().prepareMultiGet() + .add(new MultiGetRequest.Item("multiIndexAlias", "test", "1")) + .get(); + assertThat(mgetResponse.getResponses().length, is(1)); + assertThat(mgetResponse.getResponses()[0].getIndex(), is("multiIndexAlias")); + assertThat(mgetResponse.getResponses()[0].isFailed(), is(true)); + assertThat(mgetResponse.getResponses()[0].getFailure().getMessage(), containsString("more than one indices")); } public void testThatParentPerDocumentIsSupported() throws Exception { @@ -93,7 +123,7 @@ public class SimpleMgetIT extends ESIntegTestCase { MultiGetResponse mgetResponse = client().prepareMultiGet() .add(new MultiGetRequest.Item(indexOrAlias(), "test", "1").parent("4")) .add(new MultiGetRequest.Item(indexOrAlias(), "test", "1")) - .execute().actionGet(); + .get(); assertThat(mgetResponse.getResponses().length, is(2)); assertThat(mgetResponse.getResponses()[0].isFailed(), is(false)); @@ -119,9 +149,11 @@ public class SimpleMgetIT extends ESIntegTestCase { MultiGetRequestBuilder request = client().prepareMultiGet(); for (int i = 0; i < 100; i++) { if (i % 2 == 0) { - request.add(new MultiGetRequest.Item(indexOrAlias(), "type", Integer.toString(i)).fetchSourceContext(new FetchSourceContext("included", "*.hidden_field"))); + request.add(new MultiGetRequest.Item(indexOrAlias(), "type", Integer.toString(i)) + .fetchSourceContext(new FetchSourceContext(true, new String[] {"included"}, new String[] {"*.hidden_field"}))); } else { - request.add(new MultiGetRequest.Item(indexOrAlias(), "type", Integer.toString(i)).fetchSourceContext(new FetchSourceContext(false))); + request.add(new MultiGetRequest.Item(indexOrAlias(), "type", Integer.toString(i)) + .fetchSourceContext(new FetchSourceContext(false))); } } @@ -159,7 +191,7 @@ public class SimpleMgetIT extends ESIntegTestCase { MultiGetResponse mgetResponse = client().prepareMultiGet() .add(new MultiGetRequest.Item(indexOrAlias(), "test", id).routing(routingOtherShard)) .add(new MultiGetRequest.Item(indexOrAlias(), "test", id)) - .execute().actionGet(); + .get(); assertThat(mgetResponse.getResponses().length, is(2)); assertThat(mgetResponse.getResponses()[0].isFailed(), is(false)); diff --git a/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java b/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java index da9169dcdbb..9bd4ab3b90f 100644 --- a/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java @@ -22,6 +22,9 @@ package org.elasticsearch.monitor.os; import org.apache.lucene.util.Constants; import org.elasticsearch.test.ESTestCase; +import java.util.Arrays; +import java.util.List; + import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.both; @@ -30,8 +33,10 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; public class OsProbeTests extends ESTestCase { + private final OsProbe probe = OsProbe.getInstance(); public void testOsInfo() { @@ -66,12 +71,6 @@ public class OsProbeTests extends ESTestCase { assertThat(loadAverage[0], greaterThanOrEqualTo((double) 0)); assertThat(loadAverage[1], greaterThanOrEqualTo((double) 0)); assertThat(loadAverage[2], greaterThanOrEqualTo((double) 0)); - } else if (Constants.FREE_BSD) { - // five- and fifteen-minute load averages not available if linprocfs is not mounted at /compat/linux/proc - assertNotNull(loadAverage); - assertThat(loadAverage[0], greaterThanOrEqualTo((double) 0)); - assertThat(loadAverage[1], anyOf(equalTo((double) -1), greaterThanOrEqualTo((double) 0))); - assertThat(loadAverage[2], anyOf(equalTo((double) -1), greaterThanOrEqualTo((double) 0))); } else if (Constants.MAC_OS_X) { // one minute load average is available, but 10-minute and 15-minute load averages are not assertNotNull(loadAverage); @@ -108,5 +107,115 @@ public class OsProbeTests extends ESTestCase { assertThat(stats.getSwap().getFree().getBytes(), equalTo(0L)); assertThat(stats.getSwap().getUsed().getBytes(), equalTo(0L)); } + + if (Constants.LINUX) { + if (stats.getCgroup() != null) { + assertThat(stats.getCgroup().getCpuAcctControlGroup(), notNullValue()); + assertThat(stats.getCgroup().getCpuAcctUsageNanos(), greaterThan(0L)); + assertThat(stats.getCgroup().getCpuCfsQuotaMicros(), anyOf(equalTo(-1L), greaterThanOrEqualTo(0L))); + assertThat(stats.getCgroup().getCpuCfsPeriodMicros(), greaterThanOrEqualTo(0L)); + assertThat(stats.getCgroup().getCpuStat().getNumberOfElapsedPeriods(), greaterThanOrEqualTo(0L)); + assertThat(stats.getCgroup().getCpuStat().getNumberOfTimesThrottled(), greaterThanOrEqualTo(0L)); + assertThat(stats.getCgroup().getCpuStat().getTimeThrottledNanos(), greaterThanOrEqualTo(0L)); + } + } else { + assertNull(stats.getCgroup()); + } } + + public void testGetSystemLoadAverage() { + assumeTrue("test runs on Linux only", Constants.LINUX); + + final OsProbe probe = new OsProbe() { + @Override + String readProcLoadavg() { + return "1.51 1.69 1.99 3/417 23251"; + } + }; + + final double[] systemLoadAverage = probe.getSystemLoadAverage(); + + assertNotNull(systemLoadAverage); + assertThat(systemLoadAverage.length, equalTo(3)); + + // avoid silliness with representing doubles + assertThat(systemLoadAverage[0], equalTo(Double.parseDouble("1.51"))); + assertThat(systemLoadAverage[1], equalTo(Double.parseDouble("1.69"))); + assertThat(systemLoadAverage[2], equalTo(Double.parseDouble("1.99"))); + } + + public void testCgroupProbe() { + assumeTrue("test runs on Linux only", Constants.LINUX); + + final boolean areCgroupStatsAvailable = randomBoolean(); + final String hierarchy = randomAsciiOfLength(16); + + final OsProbe probe = new OsProbe() { + + @Override + List readProcSelfCgroup() { + return Arrays.asList( + "11:freezer:/", + "10:net_cls,net_prio:/", + "9:pids:/", + "8:cpuset:/", + "7:blkio:/", + "6:memory:/", + "5:devices:/user.slice", + "4:hugetlb:/", + "3:perf_event:/", + "2:cpu,cpuacct:/" + hierarchy, + "1:name=systemd:/user.slice/user-1000.slice/session-2359.scope"); + } + + @Override + String readSysFsCgroupCpuAcctCpuAcctUsage(String controlGroup) { + assertThat(controlGroup, equalTo("/" + hierarchy)); + return "364869866063112"; + } + + @Override + String readSysFsCgroupCpuAcctCpuCfsPeriod(String controlGroup) { + assertThat(controlGroup, equalTo("/" + hierarchy)); + return "100000"; + } + + @Override + String readSysFsCgroupCpuAcctCpuAcctCfsQuota(String controlGroup) { + assertThat(controlGroup, equalTo("/" + hierarchy)); + return "50000"; + } + + @Override + List readSysFsCgroupCpuAcctCpuStat(String controlGroup) { + return Arrays.asList( + "nr_periods 17992", + "nr_throttled 1311", + "throttled_time 139298645489"); + } + + @Override + protected boolean areCgroupStatsAvailable() { + return areCgroupStatsAvailable; + } + + }; + + final OsStats.Cgroup cgroup = probe.osStats().getCgroup(); + + if (areCgroupStatsAvailable) { + assertNotNull(cgroup); + assertThat(cgroup.getCpuAcctControlGroup(), equalTo("/" + hierarchy)); + assertThat(cgroup.getCpuAcctUsageNanos(), equalTo(364869866063112L)); + assertThat(cgroup.getCpuControlGroup(), equalTo("/" + hierarchy)); + assertThat(cgroup.getCpuCfsPeriodMicros(), equalTo(100000L)); + assertThat(cgroup.getCpuCfsQuotaMicros(), equalTo(50000L)); + assertThat(cgroup.getCpuStat().getNumberOfElapsedPeriods(), equalTo(17992L)); + assertThat(cgroup.getCpuStat().getNumberOfTimesThrottled(), equalTo(1311L)); + assertThat(cgroup.getCpuStat().getTimeThrottledNanos(), equalTo(139298645489L)); + } else { + assertNull(cgroup); + } + } + } diff --git a/core/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java b/core/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java index 30d527311b3..8334f71e86a 100644 --- a/core/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java @@ -36,7 +36,14 @@ public class OsStatsTests extends ESTestCase { OsStats.Cpu cpu = new OsStats.Cpu(randomShort(), loadAverages); OsStats.Mem mem = new OsStats.Mem(randomLong(), randomLong()); OsStats.Swap swap = new OsStats.Swap(randomLong(), randomLong()); - OsStats osStats = new OsStats(System.currentTimeMillis(), cpu, mem, swap); + OsStats.Cgroup cgroup = new OsStats.Cgroup( + randomAsciiOfLength(8), + randomPositiveLong(), + randomAsciiOfLength(8), + randomPositiveLong(), + randomPositiveLong(), + new OsStats.Cgroup.CpuStat(randomPositiveLong(), randomPositiveLong(), randomPositiveLong())); + OsStats osStats = new OsStats(System.currentTimeMillis(), cpu, mem, swap, cgroup); try (BytesStreamOutput out = new BytesStreamOutput()) { osStats.writeTo(out); @@ -49,7 +56,22 @@ public class OsStatsTests extends ESTestCase { assertEquals(osStats.getMem().getTotal(), deserializedOsStats.getMem().getTotal()); assertEquals(osStats.getSwap().getFree(), deserializedOsStats.getSwap().getFree()); assertEquals(osStats.getSwap().getTotal(), deserializedOsStats.getSwap().getTotal()); + assertEquals(osStats.getCgroup().getCpuAcctControlGroup(), deserializedOsStats.getCgroup().getCpuAcctControlGroup()); + assertEquals(osStats.getCgroup().getCpuAcctUsageNanos(), deserializedOsStats.getCgroup().getCpuAcctUsageNanos()); + assertEquals(osStats.getCgroup().getCpuControlGroup(), deserializedOsStats.getCgroup().getCpuControlGroup()); + assertEquals(osStats.getCgroup().getCpuCfsPeriodMicros(), deserializedOsStats.getCgroup().getCpuCfsPeriodMicros()); + assertEquals(osStats.getCgroup().getCpuCfsQuotaMicros(), deserializedOsStats.getCgroup().getCpuCfsQuotaMicros()); + assertEquals( + osStats.getCgroup().getCpuStat().getNumberOfElapsedPeriods(), + deserializedOsStats.getCgroup().getCpuStat().getNumberOfElapsedPeriods()); + assertEquals( + osStats.getCgroup().getCpuStat().getNumberOfTimesThrottled(), + deserializedOsStats.getCgroup().getCpuStat().getNumberOfTimesThrottled()); + assertEquals( + osStats.getCgroup().getCpuStat().getTimeThrottledNanos(), + deserializedOsStats.getCgroup().getCpuStat().getTimeThrottledNanos()); } } } + } diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index d9134ba5cf3..59077367904 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContent; @@ -106,7 +105,7 @@ public class NodeInfoStreamingTests extends ESTestCase { private static NodeInfo createNodeInfo() { Build build = Build.CURRENT; - DiscoveryNode node = new DiscoveryNode("test_node", LocalTransportAddress.buildUnique(), + DiscoveryNode node = new DiscoveryNode("test_node", buildNewFakeTransportAddress(), emptyMap(), emptySet(), VersionUtils.randomVersion(random())); Settings settings = randomBoolean() ? null : Settings.builder().put("test", "setting").build(); OsInfo osInfo = null; @@ -133,7 +132,7 @@ public class NodeInfoStreamingTests extends ESTestCase { } Map profileAddresses = new HashMap<>(); BoundTransportAddress dummyBoundTransportAddress = new BoundTransportAddress( - new TransportAddress[]{LocalTransportAddress.buildUnique()}, LocalTransportAddress.buildUnique()); + new TransportAddress[]{buildNewFakeTransportAddress()}, buildNewFakeTransportAddress()); profileAddresses.put("test_address", dummyBoundTransportAddress); TransportInfo transport = randomBoolean() ? null : new TransportInfo(dummyBoundTransportAddress, profileAddresses); HttpInfo httpInfo = randomBoolean() ? null : new HttpInfo(dummyBoundTransportAddress, randomLong()); diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java index 8a4c50f8a9d..2147cea696d 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java +++ b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java @@ -36,9 +36,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -/** - * - */ @ClusterScope(scope= Scope.TEST, numDataNodes = 0) public class SimpleNodesInfoIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java new file mode 100644 index 00000000000..fea2e4699d5 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.operateAllIndices; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESIntegTestCase; +import org.junit.After; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +public class DestructiveOperationsIT extends ESIntegTestCase { + + @After + public void afterTest() { + Settings settings = Settings.builder().put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), (String)null).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + + public void testDeleteIndexIsRejected() throws Exception { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + + createIndex("index1", "1index"); + + // Should succeed, since no wildcards + assertAcked(client().admin().indices().prepareDelete("1index").get()); + + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareDelete("i*").get()); + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareDelete("_all").get()); + } + + public void testDeleteIndexDefaultBehaviour() throws Exception { + if (randomBoolean()) { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + + createIndex("index1", "1index"); + + if (randomBoolean()) { + assertAcked(client().admin().indices().prepareDelete("_all").get()); + } else { + assertAcked(client().admin().indices().prepareDelete("*").get()); + } + + assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false)); + } + + public void testCloseIndexIsRejected() throws Exception { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + + createIndex("index1", "1index"); + + // Should succeed, since no wildcards + assertAcked(client().admin().indices().prepareClose("1index").get()); + + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareClose("i*").get()); + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareClose("_all").get()); + } + + public void testCloseIndexDefaultBehaviour() throws Exception { + if (randomBoolean()) { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + + createIndex("index1", "1index"); + + if (randomBoolean()) { + assertAcked(client().admin().indices().prepareClose("_all").get()); + } else { + assertAcked(client().admin().indices().prepareClose("*").get()); + } + + ClusterState state = client().admin().cluster().prepareState().get().getState(); + for (ObjectObjectCursor indexMetaDataObjectObjectCursor : state.getMetaData().indices()) { + assertEquals(IndexMetaData.State.CLOSE, indexMetaDataObjectObjectCursor.value.getState()); + } + } + + public void testOpenIndexIsRejected() throws Exception { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + + createIndex("index1", "1index"); + assertAcked(client().admin().indices().prepareClose("1index", "index1").get()); + + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareOpen("i*").get()); + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareOpen("_all").get()); + } + + public void testOpenIndexDefaultBehaviour() throws Exception { + if (randomBoolean()) { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + + createIndex("index1", "1index"); + assertAcked(client().admin().indices().prepareClose("1index", "index1").get()); + + if (randomBoolean()) { + assertAcked(client().admin().indices().prepareOpen("_all").get()); + } else { + assertAcked(client().admin().indices().prepareOpen("*").get()); + } + + ClusterState state = client().admin().cluster().prepareState().get().getState(); + for (ObjectObjectCursor indexMetaDataObjectObjectCursor : state.getMetaData().indices()) { + assertEquals(IndexMetaData.State.OPEN, indexMetaDataObjectObjectCursor.value.getState()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java deleted file mode 100644 index 83f600a8c2f..00000000000 --- a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.operateAllIndices; - -import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESIntegTestCase; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; - -/** - */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) -public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { - // One test for test performance, since cluster scope is test - // The cluster scope is test b/c we can't clear cluster settings. - public void testDestructiveOperations() throws Exception { - Settings settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - - assertAcked(client().admin().indices().prepareCreate("index1").get()); - assertAcked(client().admin().indices().prepareCreate("1index").get()); - - // Should succeed, since no wildcards - assertAcked(client().admin().indices().prepareDelete("1index").get()); - - try { - // should fail since index1 is the only index. - client().admin().indices().prepareDelete("i*").get(); - fail(); - } catch (IllegalArgumentException e) { - } - - try { - client().admin().indices().prepareDelete("_all").get(); - fail(); - } catch (IllegalArgumentException e) { - } - - settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - - assertAcked(client().admin().indices().prepareDelete("_all").get()); - assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false)); - - // end delete index: - // close index: - settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - - assertAcked(client().admin().indices().prepareCreate("index1").get()); - assertAcked(client().admin().indices().prepareCreate("1index").get()); - // Should succeed, since no wildcards - assertAcked(client().admin().indices().prepareClose("1index").get()); - - try { - client().admin().indices().prepareClose("_all").get(); - fail(); - } catch (IllegalArgumentException e) { - } - try { - assertAcked(client().admin().indices().prepareOpen("_all").get()); - fail(); - } catch (IllegalArgumentException e) { - } - try { - client().admin().indices().prepareClose("*").get(); - fail(); - } catch (IllegalArgumentException e) { - } - try { - assertAcked(client().admin().indices().prepareOpen("*").get()); - fail(); - } catch (IllegalArgumentException e) { - } - - settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - assertAcked(client().admin().indices().prepareClose("_all").get()); - assertAcked(client().admin().indices().prepareOpen("_all").get()); - - // end close index: - client().admin().indices().prepareDelete("_all").get(); - } -} diff --git a/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java index 75904e69c25..4a61bebd4db 100644 --- a/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java @@ -39,9 +39,6 @@ import org.elasticsearch.test.ESIntegTestCase.Scope; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -/** - * - */ @ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0) public class FullRollingRestartIT extends ESIntegTestCase { protected void assertTimeout(ClusterHealthRequestBuilder requestBuilder) { @@ -152,7 +149,8 @@ public class FullRollingRestartIT extends ESIntegTestCase { ClusterState state = client().admin().cluster().prepareState().get().getState(); RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { - assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode() + "\n" + state.prettyPrint(), recoveryState.getRecoverySource().getType() != RecoverySource.Type.PEER || recoveryState.getPrimary() == false); + assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode() + "\n" + state, + recoveryState.getRecoverySource().getType() != RecoverySource.Type.PEER || recoveryState.getPrimary() == false); } internalCluster().restartRandomDataNode(); ensureGreen(); @@ -160,7 +158,8 @@ public class FullRollingRestartIT extends ESIntegTestCase { recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { - assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state.prettyPrint() + "\nafter: \n" + afterState.prettyPrint(), recoveryState.getRecoverySource().getType() != RecoverySource.Type.PEER || recoveryState.getPrimary() == false); + assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state, + recoveryState.getRecoverySource().getType() != RecoverySource.Type.PEER || recoveryState.getPrimary() == false); } } } diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java deleted file mode 100644 index 26c22fc3bb0..00000000000 --- a/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.recovery; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import java.util.concurrent.TimeUnit; - -public class RecoverySettingsTests extends ESSingleNodeTestCase { - @Override - protected boolean resetNodeAfterTest() { - return true; - } - - public void testAllSettingsAreDynamicallyUpdatable() { - innerTestSettings(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), 0, new Validator() { - @Override - public void validate(RecoverySettings recoverySettings, int expectedValue) { - assertEquals(null, recoverySettings.rateLimiter()); - } - }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { - @Override - public void validate(RecoverySettings recoverySettings, int expectedValue) { - assertEquals(expectedValue, recoverySettings.retryDelayStateSync().millis()); - } - }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { - @Override - public void validate(RecoverySettings recoverySettings, int expectedValue) { - assertEquals(expectedValue, recoverySettings.retryDelayNetwork().millis()); - } - }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { - @Override - public void validate(RecoverySettings recoverySettings, int expectedValue) { - assertEquals(expectedValue, recoverySettings.activityTimeout().millis()); - } - }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { - @Override - public void validate(RecoverySettings recoverySettings, int expectedValue) { - assertEquals(expectedValue, recoverySettings.internalActionTimeout().millis()); - } - }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { - @Override - public void validate(RecoverySettings recoverySettings, int expectedValue) { - assertEquals(expectedValue, recoverySettings.internalActionLongTimeout().millis()); - } - }); - } - - private static class Validator { - public void validate(RecoverySettings recoverySettings, int expectedValue) { - } - - public void validate(RecoverySettings recoverySettings, boolean expectedValue) { - } - } - - private void innerTestSettings(String key, int newValue, Validator validator) { - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(key, newValue)).get(); - validator.validate(getInstanceFromNode(RecoverySettings.class), newValue); - } - - private void innerTestSettings(String key, int newValue, TimeUnit timeUnit, Validator validator) { - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(key, newValue, timeUnit)).get(); - validator.validate(getInstanceFromNode(RecoverySettings.class), newValue); - } - - private void innerTestSettings(String key, int newValue, ByteSizeUnit byteSizeUnit, Validator validator) { - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(key, newValue, byteSizeUnit)).get(); - validator.validate(getInstanceFromNode(RecoverySettings.class), newValue); - } - - private void innerTestSettings(String key, boolean newValue, Validator validator) { - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(key, newValue)).get(); - validator.validate(getInstanceFromNode(RecoverySettings.class), newValue); - } - -} diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index 978a30920d0..205f2bbfada 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -97,8 +97,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; -/** - */ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) @TestLogging("_root:DEBUG,org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.index.shard.service:TRACE") @LuceneTestCase.AwaitsFix(bugUrl = "primary relocation needs to transfer the global check point. otherwise the new primary sends a " + diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 339d7d6d527..b81de043921 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -30,10 +30,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.indices.recovery.IndexRecoveryIT; -import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; -import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; +import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; +import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -51,6 +50,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -61,7 +61,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class, RecoverySettingsChunkSizePlugin.class); } /** @@ -71,9 +71,9 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { * Later we allow full recovery to ensure we can still recover and don't run into corruptions. */ public void testCancelRecoveryAndResume() throws Exception { - for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) { - IndexRecoveryIT.setChunkSize(settings, new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)); - } + assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() + .put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES))) + .get().isAcknowledged()); NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get(); List dataNodeStats = new ArrayList<>(); diff --git a/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java b/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java new file mode 100644 index 00000000000..ef67006c204 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java @@ -0,0 +1,205 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Table; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.action.cat.AbstractCatAction; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestChannel; +import org.elasticsearch.test.rest.FakeRestRequest; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.core.StringContains.containsString; +import static org.hamcrest.object.HasToString.hasToString; +import static org.mockito.Mockito.mock; + +public class BaseRestHandlerTests extends ESTestCase { + + public void testOneUnconsumedParameters() throws Exception { + final AtomicBoolean executed = new AtomicBoolean(); + BaseRestHandler handler = new BaseRestHandler(Settings.EMPTY) { + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + request.param("consumed"); + return channel -> executed.set(true); + } + }; + + final HashMap params = new HashMap<>(); + params.put("consumed", randomAsciiOfLength(8)); + params.put("unconsumed", randomAsciiOfLength(8)); + RestRequest request = new FakeRestRequest.Builder().withParams(params).build(); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mock(NodeClient.class))); + assertThat(e, hasToString(containsString("request [/] contains unrecognized parameter: [unconsumed]"))); + assertFalse(executed.get()); + } + + public void testMultipleUnconsumedParameters() throws Exception { + final AtomicBoolean executed = new AtomicBoolean(); + BaseRestHandler handler = new BaseRestHandler(Settings.EMPTY) { + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + request.param("consumed"); + return channel -> executed.set(true); + } + }; + + final HashMap params = new HashMap<>(); + params.put("consumed", randomAsciiOfLength(8)); + params.put("unconsumed-first", randomAsciiOfLength(8)); + params.put("unconsumed-second", randomAsciiOfLength(8)); + RestRequest request = new FakeRestRequest.Builder().withParams(params).build(); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mock(NodeClient.class))); + assertThat(e, hasToString(containsString("request [/] contains unrecognized parameters: [unconsumed-first], [unconsumed-second]"))); + assertFalse(executed.get()); + } + + public void testUnconsumedParametersDidYouMean() throws Exception { + final AtomicBoolean executed = new AtomicBoolean(); + BaseRestHandler handler = new BaseRestHandler(Settings.EMPTY) { + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + request.param("consumed"); + request.param("field"); + request.param("tokenizer"); + request.param("very_close_to_parameter_1"); + request.param("very_close_to_parameter_2"); + return channel -> executed.set(true); + } + + @Override + protected Set responseParams() { + return Collections.singleton("response_param"); + } + }; + + final HashMap params = new HashMap<>(); + params.put("consumed", randomAsciiOfLength(8)); + params.put("flied", randomAsciiOfLength(8)); + params.put("respones_param", randomAsciiOfLength(8)); + params.put("tokenzier", randomAsciiOfLength(8)); + params.put("very_close_to_parametre", randomAsciiOfLength(8)); + params.put("very_far_from_every_consumed_parameter", randomAsciiOfLength(8)); + RestRequest request = new FakeRestRequest.Builder().withParams(params).build(); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mock(NodeClient.class))); + assertThat( + e, + hasToString(containsString( + "request [/] contains unrecognized parameters: " + + "[flied] -> did you mean [field]?, " + + "[respones_param] -> did you mean [response_param]?, " + + "[tokenzier] -> did you mean [tokenizer]?, " + + "[very_close_to_parametre] -> did you mean any of [very_close_to_parameter_1, very_close_to_parameter_2]?, " + + "[very_far_from_every_consumed_parameter]"))); + assertFalse(executed.get()); + } + + public void testUnconsumedResponseParameters() throws Exception { + final AtomicBoolean executed = new AtomicBoolean(); + BaseRestHandler handler = new BaseRestHandler(Settings.EMPTY) { + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + request.param("consumed"); + return channel -> executed.set(true); + } + + @Override + protected Set responseParams() { + return Collections.singleton("response_param"); + } + }; + + final HashMap params = new HashMap<>(); + params.put("consumed", randomAsciiOfLength(8)); + params.put("response_param", randomAsciiOfLength(8)); + RestRequest request = new FakeRestRequest.Builder().withParams(params).build(); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + handler.handleRequest(request, channel, mock(NodeClient.class)); + assertTrue(executed.get()); + } + + public void testDefaultResponseParameters() throws Exception { + final AtomicBoolean executed = new AtomicBoolean(); + BaseRestHandler handler = new BaseRestHandler(Settings.EMPTY) { + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + return channel -> executed.set(true); + } + }; + + final HashMap params = new HashMap<>(); + params.put("format", randomAsciiOfLength(8)); + params.put("filter_path", randomAsciiOfLength(8)); + params.put("pretty", randomAsciiOfLength(8)); + params.put("human", randomAsciiOfLength(8)); + RestRequest request = new FakeRestRequest.Builder().withParams(params).build(); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + handler.handleRequest(request, channel, mock(NodeClient.class)); + assertTrue(executed.get()); + } + + public void testCatResponseParameters() throws Exception { + final AtomicBoolean executed = new AtomicBoolean(); + AbstractCatAction handler = new AbstractCatAction(Settings.EMPTY) { + @Override + protected RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { + return channel -> executed.set(true); + } + + @Override + protected void documentation(StringBuilder sb) { + + } + + @Override + protected Table getTableWithHeader(RestRequest request) { + return null; + } + }; + + final HashMap params = new HashMap<>(); + params.put("format", randomAsciiOfLength(8)); + params.put("h", randomAsciiOfLength(8)); + params.put("v", randomAsciiOfLength(8)); + params.put("ts", randomAsciiOfLength(8)); + params.put("pri", randomAsciiOfLength(8)); + params.put("bytes", randomAsciiOfLength(8)); + params.put("size", randomAsciiOfLength(8)); + params.put("time", randomAsciiOfLength(8)); + RestRequest request = new FakeRestRequest.Builder().withParams(params).build(); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + handler.handleRequest(request, channel, mock(NodeClient.class)); + assertTrue(executed.get()); + } + +} diff --git a/core/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java b/core/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java new file mode 100644 index 00000000000..2bc0d0bdc81 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestChannel; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponse.Empty; + +import java.util.concurrent.atomic.AtomicReference; + +public class RestBuilderListenerTests extends ESTestCase { + + public void testXContentBuilderClosedInBuildResponse() throws Exception { + AtomicReference builderAtomicReference = new AtomicReference<>(); + RestBuilderListener builderListener = + new RestBuilderListener(new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1)) { + @Override + public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { + builderAtomicReference.set(builder); + builder.close(); + return new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY); + } + }; + + builderListener.buildResponse(Empty.INSTANCE); + assertNotNull(builderAtomicReference.get()); + assertTrue(builderAtomicReference.get().generator().isClosed()); + } + + public void testXContentBuilderNotClosedInBuildResponseAssertionsDisabled() throws Exception { + AtomicReference builderAtomicReference = new AtomicReference<>(); + RestBuilderListener builderListener = + new RestBuilderListener(new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1)) { + @Override + public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { + builderAtomicReference.set(builder); + return new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY); + } + + @Override + boolean assertBuilderClosed(XContentBuilder xContentBuilder) { + // don't check the actual builder being closed so we can test auto close + return true; + } + }; + + builderListener.buildResponse(Empty.INSTANCE); + assertNotNull(builderAtomicReference.get()); + assertTrue(builderAtomicReference.get().generator().isClosed()); + } + + public void testXContentBuilderNotClosedInBuildResponseAssertionsEnabled() throws Exception { + assumeTrue("tests are not being run with assertions", RestBuilderListener.class.desiredAssertionStatus()); + + RestBuilderListener builderListener = + new RestBuilderListener(new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1)) { + @Override + public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY); + } + }; + + AssertionError error = expectThrows(AssertionError.class, () -> builderListener.buildResponse(Empty.INSTANCE)); + assertEquals("callers should ensure the XContentBuilder is closed themselves", error.getMessage()); + } +} diff --git a/core/src/test/java/org/elasticsearch/rest/action/RestMainActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/RestMainActionTests.java index bef1ed44ac6..449f5852cfa 100644 --- a/core/src/test/java/org/elasticsearch/rest/action/RestMainActionTests.java +++ b/core/src/test/java/org/elasticsearch/rest/action/RestMainActionTests.java @@ -61,9 +61,9 @@ public class RestMainActionTests extends ESTestCase { BytesRestResponse response = RestMainAction.convertMainResponse(mainResponse, restRequest, builder); assertNotNull(response); assertEquals(expectedStatus, response.status()); - assertEquals(0, response.content().length()); - assertEquals(0, builder.bytes().length()); + // the empty responses are handled in the HTTP layer so we do + // not assert on them here } public void testGetResponse() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java index 9b7d4073d0d..385bfd17b1d 100644 --- a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java +++ b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java @@ -118,7 +118,7 @@ public class RestAnalyzeActionTests extends ESTestCase { assertThat(e.getMessage(), startsWith("explain must be either 'true' or 'false'")); } - public void testDeprecatedParamException() throws Exception { + public void testDeprecatedParamIn2xException() throws Exception { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestAnalyzeAction.buildFromContent( XContentFactory.jsonBuilder() @@ -165,5 +165,4 @@ public class RestAnalyzeActionTests extends ESTestCase { , new AnalyzeRequest("for test"), new ParseFieldMatcher(Settings.EMPTY))); assertThat(e.getMessage(), startsWith("Unknown parameter [token_filter]")); } - } diff --git a/core/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java b/core/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java index f7dc6149052..ba3ec55ab2a 100644 --- a/core/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java +++ b/core/src/test/java/org/elasticsearch/rest/action/cat/RestTableTests.java @@ -23,12 +23,12 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.cat.RestTable; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.junit.Before; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -62,11 +62,13 @@ public class RestTableTests extends ESTestCase { " invalidAliasesBulk: \"foo\"\n" + " timestamp: \"foo\"\n" + " epoch: \"foo\"\n"; - private Table table = new Table(); - private FakeRestRequest restRequest = new FakeRestRequest(); + private Table table; + private FakeRestRequest restRequest; @Before public void setup() { + restRequest = new FakeRestRequest(); + table = new Table(); table.startHeaders(); table.addCell("bulk.foo", "alias:f;desc:foo"); table.addCell("bulk.bar", "alias:b;desc:bar"); @@ -146,6 +148,110 @@ public class RestTableTests extends ESTestCase { assertThat(headerNames, not(hasItem("epoch"))); } + public void testCompareRow() { + Table table = new Table(); + table.startHeaders(); + table.addCell("compare"); + table.endHeaders(); + + for (Integer i : Arrays.asList(1,2,1)) { + table.startRow(); + table.addCell(i); + table.endRow(); + } + + RestTable.TableIndexComparator comparator = new RestTable.TableIndexComparator(table, + Collections.singletonList(new RestTable.ColumnOrderElement("compare", false))); + assertTrue(comparator.compare(0,1) < 0); + assertTrue(comparator.compare(0,2) == 0); + assertTrue(comparator.compare(1,2) > 0); + + RestTable.TableIndexComparator reverseComparator = new RestTable.TableIndexComparator(table, + Collections.singletonList(new RestTable.ColumnOrderElement("compare", true))); + + assertTrue(reverseComparator.compare(0,1) > 0); + assertTrue(reverseComparator.compare(0,2) == 0); + assertTrue(reverseComparator.compare(1,2) < 0); + } + + public void testRowOutOfBounds() { + Table table = new Table(); + table.startHeaders(); + table.addCell("compare"); + table.endHeaders(); + RestTable.TableIndexComparator comparator = new RestTable.TableIndexComparator(table, + Collections.singletonList(new RestTable.ColumnOrderElement("compare", false))); + Error e = expectThrows(AssertionError.class, () -> { + comparator.compare(0,1); + }); + assertEquals("Invalid comparison of indices (0, 1): Table has 0 rows.", e.getMessage()); + } + + public void testUnknownHeader() { + Table table = new Table(); + table.startHeaders(); + table.addCell("compare"); + table.endHeaders(); + restRequest.params().put("s", "notaheader"); + Exception e = expectThrows(UnsupportedOperationException.class, () -> RestTable.getRowOrder(table, restRequest)); + assertEquals("Unable to sort by unknown sort key `notaheader`", e.getMessage()); + } + + public void testAliasSort() { + Table table = new Table(); + table.startHeaders(); + table.addCell("compare", "alias:c;"); + table.endHeaders(); + List comparisonList = Arrays.asList(3,1,2); + for (int i = 0; i < comparisonList.size(); i++) { + table.startRow(); + table.addCell(comparisonList.get(i)); + table.endRow(); + } + restRequest.params().put("s", "c"); + List rowOrder = RestTable.getRowOrder(table, restRequest); + assertEquals(Arrays.asList(1,2,0), rowOrder); + } + + public void testReversedSort() { + Table table = new Table(); + table.startHeaders(); + table.addCell("reversed"); + table.endHeaders(); + List comparisonList = Arrays.asList(0, 1, 2); + for (int i = 0; i < comparisonList.size(); i++) { + table.startRow(); + table.addCell(comparisonList.get(i)); + table.endRow(); + } + restRequest.params().put("s", "reversed:desc"); + List rowOrder = RestTable.getRowOrder(table, restRequest); + assertEquals(Arrays.asList(2,1,0), rowOrder); + } + + public void testMultiSort() { + Table table = new Table(); + table.startHeaders(); + table.addCell("compare"); + table.addCell("second.compare"); + table.endHeaders(); + List comparisonList = Arrays.asList(3, 3, 2); + List secondComparisonList = Arrays.asList(2, 1, 3); + for (int i = 0; i < comparisonList.size(); i++) { + table.startRow(); + table.addCell(comparisonList.get(i)); + table.addCell(secondComparisonList.get(i)); + table.endRow(); + } + restRequest.params().put("s", "compare,second.compare"); + List rowOrder = RestTable.getRowOrder(table, restRequest); + assertEquals(Arrays.asList(2,1,0), rowOrder); + + restRequest.params().put("s", "compare:desc,second.compare"); + rowOrder = RestTable.getRowOrder(table, restRequest); + assertEquals(Arrays.asList(1,0,2), rowOrder); + } + private RestResponse assertResponseContentType(Map headers, String mediaType) throws Exception { FakeRestRequest requestWithAcceptHeader = new FakeRestRequest.Builder().withHeaders(headers).build(); table.startRow(); diff --git a/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java index c60e9e06d16..fdfcc4fcb91 100644 --- a/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java @@ -37,9 +37,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class AliasResolveRoutingIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java index d8cf1e7b5ec..2490134db4e 100644 --- a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.routing; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -259,7 +260,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo("index")); + assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -280,7 +281,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo("update")); + assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.UPDATE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); @@ -301,7 +302,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : bulkResponse) { assertThat(bulkItemResponse.isFailed(), equalTo(true)); - assertThat(bulkItemResponse.getOpType(), equalTo("delete")); + assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.DELETE)); assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST)); assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class)); assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]")); diff --git a/core/src/test/java/org/elasticsearch/script/FileScriptTests.java b/core/src/test/java/org/elasticsearch/script/FileScriptTests.java index 954de105a4f..f5ae13cf3a0 100644 --- a/core/src/test/java/org/elasticsearch/script/FileScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/FileScriptTests.java @@ -26,8 +26,6 @@ import org.elasticsearch.test.ESTestCase; import java.nio.file.Files; import java.nio.file.Path; import java.util.Collections; -import java.util.HashSet; -import java.util.Set; // TODO: these really should just be part of ScriptService tests, there is nothing special about them public class FileScriptTests extends ESTestCase { @@ -56,7 +54,7 @@ public class FileScriptTests extends ESTestCase { Settings settings = Settings.builder() .put("script.engine." + MockScriptEngine.NAME + ".file.aggs", "false").build(); ScriptService scriptService = makeScriptService(settings); - Script script = new Script("script1", ScriptService.ScriptType.FILE, MockScriptEngine.NAME, null); + Script script = new Script(ScriptType.FILE, MockScriptEngine.NAME, "script1", Collections.emptyMap()); CompiledScript compiledScript = scriptService.compile(script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); assertNotNull(compiledScript); MockCompiledScript executable = (MockCompiledScript) compiledScript.compiled(); @@ -71,7 +69,7 @@ public class FileScriptTests extends ESTestCase { .put("script.engine." + MockScriptEngine.NAME + ".file.update", "false") .put("script.engine." + MockScriptEngine.NAME + ".file.ingest", "false").build(); ScriptService scriptService = makeScriptService(settings); - Script script = new Script("script1", ScriptService.ScriptType.FILE, MockScriptEngine.NAME, null); + Script script = new Script(ScriptType.FILE, MockScriptEngine.NAME, "script1", Collections.emptyMap()); for (ScriptContext context : ScriptContext.Standard.values()) { try { scriptService.compile(script, context, Collections.emptyMap()); diff --git a/core/src/test/java/org/elasticsearch/script/IndexLookupIT.java b/core/src/test/java/org/elasticsearch/script/IndexLookupIT.java index 7e57d41acea..f41d9d7c394 100644 --- a/core/src/test/java/org/elasticsearch/script/IndexLookupIT.java +++ b/core/src/test/java/org/elasticsearch/script/IndexLookupIT.java @@ -48,7 +48,7 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; import static java.util.Collections.emptyList; -import static org.elasticsearch.script.ScriptService.ScriptType; + import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; @@ -683,7 +683,7 @@ public class IndexLookupIT extends ESIntegTestCase { } private Script createScript(String script) { - return new Script(script, ScriptType.INLINE, CustomScriptPlugin.NAME, null); + return new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, script, Collections.emptyMap()); } public void testFlags() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java index 1ee4c58455b..5423103f92d 100644 --- a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; -import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.watcher.ResourceWatcherService; @@ -54,8 +53,7 @@ public class NativeScriptTests extends ESTestCase { scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED); ExecutableScript executable = scriptModule.getScriptService().executable( - new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), ScriptContext.Standard.SEARCH, - Collections.emptyMap()); + new Script(ScriptType.INLINE, NativeScriptEngineService.NAME, "my", Collections.emptyMap()), ScriptContext.Standard.SEARCH); assertThat(executable.run().toString(), equalTo("test")); } @@ -63,7 +61,7 @@ public class NativeScriptTests extends ESTestCase { Settings.Builder builder = Settings.builder(); if (randomBoolean()) { ScriptType scriptType = randomFrom(ScriptType.values()); - builder.put("script" + "." + scriptType.getScriptType(), randomBoolean()); + builder.put("script" + "." + scriptType.getName(), randomBoolean()); } else { ScriptContext scriptContext = randomFrom(ScriptContext.Standard.values()); builder.put("script" + "." + scriptContext.getKey(), randomBoolean()); @@ -81,8 +79,8 @@ public class NativeScriptTests extends ESTestCase { scriptContextRegistry, scriptSettings); for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) { - assertThat(scriptService.compile(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), scriptContext, - Collections.emptyMap()), notNullValue()); + assertThat(scriptService.compile(new Script(ScriptType.INLINE, NativeScriptEngineService.NAME, "my", Collections.emptyMap()), + scriptContext, Collections.emptyMap()), notNullValue()); } } diff --git a/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java b/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java index 16a1c20792f..9578e0c6281 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java @@ -25,9 +25,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; import java.util.List; -import java.util.Set; import static org.hamcrest.Matchers.containsString; @@ -58,9 +56,9 @@ public class ScriptContextTests extends ESTestCase { public void testCustomGlobalScriptContextSettings() throws Exception { ScriptService scriptService = makeScriptService(); - for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { + for (ScriptType scriptType : ScriptType.values()) { try { - Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); + Script script = new Script(scriptType, MockScriptEngine.NAME, "1", Collections.emptyMap()); scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op"), Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (IllegalStateException e) { @@ -71,7 +69,7 @@ public class ScriptContextTests extends ESTestCase { public void testCustomScriptContextSettings() throws Exception { ScriptService scriptService = makeScriptService(); - Script script = new Script("1", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, null); + Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "1", Collections.emptyMap()); try { scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op"), Collections.emptyMap()); fail("script compilation should have been rejected"); @@ -87,9 +85,9 @@ public class ScriptContextTests extends ESTestCase { public void testUnknownPluginScriptContext() throws Exception { ScriptService scriptService = makeScriptService(); - for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { + for (ScriptType scriptType : ScriptType.values()) { try { - Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); + Script script = new Script(scriptType, MockScriptEngine.NAME, "1", Collections.emptyMap()); scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "unknown"), Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (IllegalArgumentException e) { @@ -106,9 +104,9 @@ public class ScriptContextTests extends ESTestCase { } }; ScriptService scriptService = makeScriptService(); - for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { + for (ScriptType scriptType : ScriptType.values()) { try { - Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); + Script script = new Script(scriptType, MockScriptEngine.NAME, "1", Collections.emptyMap()); scriptService.compile(script, context, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (IllegalArgumentException e) { diff --git a/core/src/test/java/org/elasticsearch/script/ScriptFieldIT.java b/core/src/test/java/org/elasticsearch/script/ScriptFieldIT.java index b15a2ab7be6..1a4a58e4b1e 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptFieldIT.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptFieldIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -32,6 +31,7 @@ import org.elasticsearch.test.ESIntegTestCase.Scope; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; @@ -61,10 +61,10 @@ public class ScriptFieldIT extends ESIntegTestCase { client().admin().indices().prepareFlush("test").execute().actionGet(); SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .addScriptField("int", new Script("int", ScriptType.INLINE, "native", null)) - .addScriptField("float", new Script("float", ScriptType.INLINE, "native", null)) - .addScriptField("double", new Script("double", ScriptType.INLINE, "native", null)) - .addScriptField("long", new Script("long", ScriptType.INLINE, "native", null)).execute().actionGet(); + .addScriptField("int", new Script(ScriptType.INLINE, "native", "int", Collections.emptyMap())) + .addScriptField("float", new Script(ScriptType.INLINE, "native", "float", Collections.emptyMap())) + .addScriptField("double", new Script(ScriptType.INLINE, "native", "double", Collections.emptyMap())) + .addScriptField("long", new Script(ScriptType.INLINE, "native", "long", Collections.emptyMap())).execute().actionGet(); assertThat(sr.getHits().hits().length, equalTo(6)); for (SearchHit hit : sr.getHits().getHits()) { Object result = hit.getFields().get("int").getValues().get(0); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java index e4a1835ab63..f6a02ae9206 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.script; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESTestCase; import org.junit.After; @@ -126,7 +125,7 @@ public class ScriptModesTests extends ESTestCase { ScriptType[] randomScriptTypes = randomScriptTypesSet.toArray(new ScriptType[randomScriptTypesSet.size()]); Settings.Builder builder = Settings.builder(); for (int i = 0; i < randomInt; i++) { - builder.put("script" + "." + randomScriptTypes[i].getScriptType(), randomScriptModes[i]); + builder.put("script" + "." + randomScriptTypes[i].getName(), randomScriptModes[i]); } this.scriptModes = new ScriptModes(scriptSettings, builder.build()); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index bc7cb9ffb60..fde01e59db2 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.env.Environment; -import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.watcher.ResourceWatcherService; @@ -170,7 +169,7 @@ public class ScriptServiceTests extends ESTestCase { Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(testFileWithExt)); resourceWatcherService.notifyNow(); - CompiledScript compiledScript = scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), + CompiledScript compiledScript = scriptService.compile(new Script(ScriptType.FILE, "test", "test_script", Collections.emptyMap()), ScriptContext.Standard.SEARCH, Collections.emptyMap()); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); @@ -179,7 +178,7 @@ public class ScriptServiceTests extends ESTestCase { resourceWatcherService.notifyNow(); try { - scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, + scriptService.compile(new Script(ScriptType.FILE, "test", "test_script", Collections.emptyMap()), ScriptContext.Standard.SEARCH, Collections.emptyMap()); fail("the script test_script should no longer exist"); } catch (IllegalArgumentException ex) { @@ -197,7 +196,7 @@ public class ScriptServiceTests extends ESTestCase { Streams.copy("test_file_script".getBytes("UTF-8"), Files.newOutputStream(testFileScript)); resourceWatcherService.notifyNow(); - CompiledScript compiledScript = scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), + CompiledScript compiledScript = scriptService.compile(new Script(ScriptType.FILE, "test", "file_script", Collections.emptyMap()), ScriptContext.Standard.SEARCH, Collections.emptyMap()); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file_script")); @@ -208,9 +207,9 @@ public class ScriptServiceTests extends ESTestCase { public void testInlineScriptCompiledOnceCache() throws IOException { buildScriptService(Settings.EMPTY); - CompiledScript compiledScript1 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), + CompiledScript compiledScript1 = scriptService.compile(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts), Collections.emptyMap()); - CompiledScript compiledScript2 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), + CompiledScript compiledScript2 = scriptService.compile(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts), Collections.emptyMap()); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @@ -267,9 +266,9 @@ public class ScriptServiceTests extends ESTestCase { Settings.Builder builder = Settings.builder(); for (Map.Entry entry : scriptSourceSettings.entrySet()) { if (entry.getValue()) { - builder.put("script" + "." + entry.getKey().getScriptType(), "true"); + builder.put("script" + "." + entry.getKey().getName(), "true"); } else { - builder.put("script" + "." + entry.getKey().getScriptType(), "false"); + builder.put("script" + "." + entry.getKey().getName(), "false"); } } for (Map.Entry entry : scriptContextSettings.entrySet()) { @@ -333,8 +332,8 @@ public class ScriptServiceTests extends ESTestCase { String type = scriptEngineService.getType(); try { - scriptService.compile(new Script("test", randomFrom(ScriptType.values()), type, null), new ScriptContext.Plugin( - pluginName, unknownContext), Collections.emptyMap()); + scriptService.compile(new Script(randomFrom(ScriptType.values()), type, "test", Collections.emptyMap()), + new ScriptContext.Plugin(pluginName, unknownContext), Collections.emptyMap()); fail("script compilation should have been rejected"); } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); @@ -343,21 +342,20 @@ public class ScriptServiceTests extends ESTestCase { public void testCompileCountedInCompilationStats() throws IOException { buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), + scriptService.compile(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts), Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } public void testExecutableCountedInCompilationStats() throws IOException { buildScriptService(Settings.EMPTY); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), Collections.emptyMap()); + scriptService.executable(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts)); assertEquals(1L, scriptService.stats().getCompilations()); } public void testSearchCountedInCompilationStats() throws IOException { buildScriptService(Settings.EMPTY); - scriptService.search(null, new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), - Collections.emptyMap()); + scriptService.search(null, new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts)); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -366,7 +364,7 @@ public class ScriptServiceTests extends ESTestCase { int numberOfCompilations = randomIntBetween(1, 1024); for (int i = 0; i < numberOfCompilations; i++) { scriptService - .compile(new Script(i + " + " + i, ScriptType.INLINE, "test", null), randomFrom(scriptContexts), + .compile(new Script(ScriptType.INLINE, "test", i + " + " + i, Collections.emptyMap()), randomFrom(scriptContexts), Collections.emptyMap()); } assertEquals(numberOfCompilations, scriptService.stats().getCompilations()); @@ -377,22 +375,22 @@ public class ScriptServiceTests extends ESTestCase { builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getKey(), 1); builder.put("script.inline", "true"); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), Collections.emptyMap()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), Collections.emptyMap()); + scriptService.executable(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts)); + scriptService.executable(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts)); assertEquals(1L, scriptService.stats().getCompilations()); } public void testFileScriptCountedInCompilationStats() throws IOException { buildScriptService(Settings.EMPTY); createFileScripts("test"); - scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), randomFrom(scriptContexts), + scriptService.compile(new Script(ScriptType.FILE, "test", "file_script", Collections.emptyMap()), randomFrom(scriptContexts), Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } public void testIndexedScriptCountedInCompilationStats() throws IOException { buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("script", ScriptType.STORED, "test", null), randomFrom(scriptContexts), + scriptService.compile(new Script(ScriptType.STORED, "test", "script", Collections.emptyMap()), randomFrom(scriptContexts), Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -402,8 +400,8 @@ public class ScriptServiceTests extends ESTestCase { builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getKey(), 1); builder.put("script.inline", "true"); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), Collections.emptyMap()); - scriptService.executable(new Script("2+2", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), Collections.emptyMap()); + scriptService.executable(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts)); + scriptService.executable(new Script(ScriptType.INLINE, "test", "2+2", Collections.emptyMap()), randomFrom(scriptContexts)); assertEquals(2L, scriptService.stats().getCompilations()); assertEquals(1L, scriptService.stats().getCacheEvictions()); } @@ -412,7 +410,8 @@ public class ScriptServiceTests extends ESTestCase { Settings.Builder builder = Settings.builder(); builder.put("script.inline", "true"); buildScriptService(builder.build()); - CompiledScript script = scriptService.compile(new Script("1 + 1", ScriptType.INLINE, null, null), + CompiledScript script = scriptService.compile( + new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "1 + 1", Collections.emptyMap()), randomFrom(scriptContexts), Collections.emptyMap()); assertEquals(script.lang(), Script.DEFAULT_SCRIPT_LANG); } @@ -494,7 +493,7 @@ public class ScriptServiceTests extends ESTestCase { private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { try { - scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, Collections.emptyMap()); + scriptService.compile(new Script(scriptType, lang, script, Collections.emptyMap()), scriptContext, Collections.emptyMap()); fail("compile should have been rejected for lang [" + lang + "], script_type [" + scriptType + "], scripted_op [" + scriptContext + "]"); } catch(IllegalStateException e) { //all good @@ -503,7 +502,7 @@ public class ScriptServiceTests extends ESTestCase { private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { assertThat( - scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, Collections.emptyMap()), + scriptService.compile(new Script(scriptType, lang, script, Collections.emptyMap()), scriptContext, Collections.emptyMap()), notNullValue() ); } diff --git a/core/src/test/java/org/elasticsearch/script/ScriptTests.java b/core/src/test/java/org/elasticsearch/script/ScriptTests.java index 316a1c8451b..c1c25a500b2 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptTests.java @@ -65,10 +65,10 @@ public class ScriptTests extends ESTestCase { } private Script createScript(XContent xContent) throws IOException { - final Map params = randomBoolean() ? null : Collections.singletonMap("key", "value"); - ScriptService.ScriptType scriptType = randomFrom(ScriptService.ScriptType.values()); + final Map params = randomBoolean() ? Collections.emptyMap() : Collections.singletonMap("key", "value"); + ScriptType scriptType = randomFrom(ScriptType.values()); String script; - if (scriptType == ScriptService.ScriptType.INLINE) { + if (scriptType == ScriptType.INLINE) { try (XContentBuilder builder = XContentBuilder.builder(xContent)) { builder.startObject(); builder.field("field", randomAsciiOfLengthBetween(1, 5)); @@ -79,11 +79,12 @@ public class ScriptTests extends ESTestCase { script = randomAsciiOfLengthBetween(1, 5); } return new Script( - script, - scriptType, - randomFrom("_lang1", "_lang2", null), - params, - scriptType == ScriptService.ScriptType.INLINE ? xContent.type() : null + scriptType, + randomFrom("_lang1", "_lang2", "_lang3"), + script, + scriptType == ScriptType.INLINE ? + Collections.singletonMap(Script.CONTENT_TYPE_OPTION, xContent.type().mediaType()) : Collections.emptyMap(), + params ); } diff --git a/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java index 048416c25ef..cf290b2b1b6 100644 --- a/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/AbstractSearchTestCase.java @@ -20,36 +20,21 @@ package org.elasticsearch.search; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.script.Script; -import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilderTests; import org.elasticsearch.search.rescore.QueryRescoreBuilderTests; -import org.elasticsearch.search.searchafter.SearchAfterBuilder; -import org.elasticsearch.search.slice.SliceBuilder; -import org.elasticsearch.search.sort.ScriptSortBuilder; -import org.elasticsearch.search.sort.SortBuilders; -import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.suggest.SuggestBuilderTests; -import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -62,12 +47,14 @@ import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.function.Function; +import java.util.function.Supplier; public abstract class AbstractSearchTestCase extends ESTestCase { protected NamedWriteableRegistry namedWriteableRegistry; protected SearchRequestParsers searchRequestParsers; private TestSearchExtPlugin searchExtPlugin; + protected IndicesQueriesRegistry queriesRegistry; public void setUp() throws Exception { super.setUp(); @@ -79,213 +66,11 @@ public abstract class AbstractSearchTestCase extends ESTestCase { entries.addAll(searchModule.getNamedWriteables()); namedWriteableRegistry = new NamedWriteableRegistry(entries); searchRequestParsers = searchModule.getSearchRequestParsers(); + queriesRegistry = searchModule.getQueryParserRegistry(); } - protected SearchSourceBuilder createSearchSourceBuilder() throws IOException { - SearchSourceBuilder builder = new SearchSourceBuilder(); - if (randomBoolean()) { - builder.from(randomIntBetween(0, 10000)); - } - if (randomBoolean()) { - builder.size(randomIntBetween(0, 10000)); - } - if (randomBoolean()) { - builder.explain(randomBoolean()); - } - if (randomBoolean()) { - builder.version(randomBoolean()); - } - if (randomBoolean()) { - builder.trackScores(randomBoolean()); - } - if (randomBoolean()) { - builder.minScore(randomFloat() * 1000); - } - if (randomBoolean()) { - builder.timeout(TimeValue.parseTimeValue(randomTimeValue(), null, "timeout")); - } - if (randomBoolean()) { - builder.terminateAfter(randomIntBetween(1, 100000)); - } - - switch(randomInt(2)) { - case 0: - builder.storedFields(); - break; - case 1: - builder.storedField("_none_"); - break; - case 2: - int fieldsSize = randomInt(25); - List fields = new ArrayList<>(fieldsSize); - for (int i = 0; i < fieldsSize; i++) { - fields.add(randomAsciiOfLengthBetween(5, 50)); - } - builder.storedFields(fields); - break; - default: - throw new IllegalStateException(); - } - - if (randomBoolean()) { - int scriptFieldsSize = randomInt(25); - for (int i = 0; i < scriptFieldsSize; i++) { - if (randomBoolean()) { - builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"), randomBoolean()); - } else { - builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo")); - } - } - } - if (randomBoolean()) { - FetchSourceContext fetchSourceContext; - int branch = randomInt(5); - String[] includes = new String[randomIntBetween(0, 20)]; - for (int i = 0; i < includes.length; i++) { - includes[i] = randomAsciiOfLengthBetween(5, 20); - } - String[] excludes = new String[randomIntBetween(0, 20)]; - for (int i = 0; i < excludes.length; i++) { - excludes[i] = randomAsciiOfLengthBetween(5, 20); - } - switch (branch) { - case 0: - fetchSourceContext = new FetchSourceContext(randomBoolean()); - break; - case 1: - fetchSourceContext = new FetchSourceContext(includes, excludes); - break; - case 2: - fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)); - break; - case 3: - fetchSourceContext = new FetchSourceContext(true, includes, excludes); - break; - case 4: - fetchSourceContext = new FetchSourceContext(includes); - break; - case 5: - fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20)); - break; - default: - throw new IllegalStateException(); - } - builder.fetchSource(fetchSourceContext); - } - if (randomBoolean()) { - int size = randomIntBetween(0, 20); - List statsGroups = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - statsGroups.add(randomAsciiOfLengthBetween(5, 20)); - } - builder.stats(statsGroups); - } - if (randomBoolean()) { - int indexBoostSize = randomIntBetween(1, 10); - for (int i = 0; i < indexBoostSize; i++) { - builder.indexBoost(randomAsciiOfLengthBetween(5, 20), randomFloat() * 10); - } - } - if (randomBoolean()) { - builder.query(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20))); - } - if (randomBoolean()) { - builder.postFilter(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20))); - } - if (randomBoolean()) { - int numSorts = randomIntBetween(1, 5); - for (int i = 0; i < numSorts; i++) { - int branch = randomInt(5); - switch (branch) { - case 0: - builder.sort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values()))); - break; - case 1: - builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20), - AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values()))); - break; - case 2: - builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); - break; - case 3: - builder.sort(SortBuilders.scriptSort(new Script("foo"), - ScriptSortBuilder.ScriptSortType.NUMBER).order(randomFrom(SortOrder.values()))); - break; - case 4: - builder.sort(randomAsciiOfLengthBetween(5, 20)); - break; - case 5: - builder.sort(randomAsciiOfLengthBetween(5, 20), randomFrom(SortOrder.values())); - break; - } - } - } - - if (randomBoolean()) { - int numSearchFrom = randomIntBetween(1, 5); - // We build a json version of the search_from first in order to - // ensure that every number type remain the same before/after xcontent (de)serialization. - // This is not a problem because the final type of each field value is extracted from associated sort field. - // This little trick ensure that equals and hashcode are the same when using the xcontent serialization. - XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); - jsonBuilder.startObject(); - jsonBuilder.startArray("search_from"); - for (int i = 0; i < numSearchFrom; i++) { - int branch = randomInt(8); - switch (branch) { - case 0: - jsonBuilder.value(randomInt()); - break; - case 1: - jsonBuilder.value(randomFloat()); - break; - case 2: - jsonBuilder.value(randomLong()); - break; - case 3: - jsonBuilder.value(randomDouble()); - break; - case 4: - jsonBuilder.value(randomAsciiOfLengthBetween(5, 20)); - break; - case 5: - jsonBuilder.value(randomBoolean()); - break; - case 6: - jsonBuilder.value(randomByte()); - break; - case 7: - jsonBuilder.value(randomShort()); - break; - case 8: - jsonBuilder.value(new Text(randomAsciiOfLengthBetween(5, 20))); - break; - } - } - jsonBuilder.endArray(); - jsonBuilder.endObject(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(jsonBuilder.bytes()); - parser.nextToken(); - parser.nextToken(); - parser.nextToken(); - builder.searchAfter(SearchAfterBuilder.fromXContent(parser, null).getSortValues()); - } - if (randomBoolean()) { - builder.highlighter(HighlightBuilderTests.randomHighlighterBuilder()); - } - if (randomBoolean()) { - builder.suggest(SuggestBuilderTests.randomSuggestBuilder()); - } - if (randomBoolean()) { - int numRescores = randomIntBetween(1, 5); - for (int i = 0; i < numRescores; i++) { - builder.addRescorer(QueryRescoreBuilderTests.randomRescoreBuilder()); - } - } - if (randomBoolean()) { - builder.aggregation(AggregationBuilders.avg(randomAsciiOfLengthBetween(5, 20))); - } - if (randomBoolean()) { + protected SearchSourceBuilder createSearchSourceBuilder() { + Supplier> randomExtBuilders = () -> { Set elementNames = new HashSet<>(searchExtPlugin.getSupportedElements().keySet()); int numSearchExts = randomIntBetween(1, elementNames.size()); while(elementNames.size() > numSearchExts) { @@ -295,51 +80,17 @@ public abstract class AbstractSearchTestCase extends ESTestCase { for (String elementName : elementNames) { searchExtBuilders.add(searchExtPlugin.getSupportedElements().get(elementName).apply(randomAsciiOfLengthBetween(3, 10))); } - builder.ext(searchExtBuilders); - } - if (randomBoolean()) { - String field = randomBoolean() ? null : randomAsciiOfLengthBetween(5, 20); - int max = between(2, 1000); - int id = randomInt(max-1); - if (field == null) { - builder.slice(new SliceBuilder(id, max)); - } else { - builder.slice(new SliceBuilder(field, id, max)); - } - } - return builder; + return searchExtBuilders; + }; + return RandomSearchRequestGenerator.randomSearchSourceBuilder( + HighlightBuilderTests::randomHighlighterBuilder, + SuggestBuilderTests::randomSuggestBuilder, + QueryRescoreBuilderTests::randomRescoreBuilder, + randomExtBuilders); } protected SearchRequest createSearchRequest() throws IOException { - SearchRequest searchRequest = new SearchRequest(); - if (randomBoolean()) { - searchRequest.indices(generateRandomStringArray(10, 10, false, false)); - } - if (randomBoolean()) { - searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); - } - if (randomBoolean()) { - searchRequest.types(generateRandomStringArray(10, 10, false, false)); - } - if (randomBoolean()) { - searchRequest.preference(randomAsciiOfLengthBetween(3, 10)); - } - if (randomBoolean()) { - searchRequest.requestCache(randomBoolean()); - } - if (randomBoolean()) { - searchRequest.routing(randomAsciiOfLengthBetween(3, 10)); - } - if (randomBoolean()) { - searchRequest.scroll(randomPositiveTimeValue()); - } - if (randomBoolean()) { - searchRequest.searchType(randomFrom(SearchType.values())); - } - if (randomBoolean()) { - searchRequest.source(createSearchSourceBuilder()); - } - return searchRequest; + return RandomSearchRequestGenerator.randomSearchRequest(this::createSearchSourceBuilder); } private static class TestSearchExtPlugin extends Plugin implements SearchPlugin { diff --git a/core/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/core/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index e546130b2e5..2c3676f1317 100644 --- a/core/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/core/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -19,14 +19,12 @@ package org.elasticsearch.search; -import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.TypeFieldMapper; -import org.elasticsearch.search.DefaultSearchContext; import org.elasticsearch.test.ESTestCase; import static org.apache.lucene.search.BooleanClause.Occur.FILTER; @@ -38,13 +36,13 @@ public class DefaultSearchContextTests extends ESTestCase { public void testCreateSearchFilter() { Query searchFilter = DefaultSearchContext.createSearchFilter(new String[]{"type1", "type2"}, null, randomBoolean()); Query expectedQuery = new BooleanQuery.Builder() - .add(new TermsQuery(TypeFieldMapper.NAME, new BytesRef("type1"), new BytesRef("type2")), FILTER) + .add(new TypeFieldMapper.TypesQuery(new BytesRef("type1"), new BytesRef("type2")), FILTER) .build(); assertThat(searchFilter, equalTo(expectedQuery)); searchFilter = DefaultSearchContext.createSearchFilter(new String[]{"type1", "type2"}, new MatchAllDocsQuery(), randomBoolean()); expectedQuery = new BooleanQuery.Builder() - .add(new TermsQuery(TypeFieldMapper.NAME, new BytesRef("type1"), new BytesRef("type2")), FILTER) + .add(new TypeFieldMapper.TypesQuery(new BytesRef("type1"), new BytesRef("type2")), FILTER) .add(new MatchAllDocsQuery(), FILTER) .build(); assertThat(searchFilter, equalTo(expectedQuery)); diff --git a/core/src/test/java/org/elasticsearch/search/SearchCancellationIT.java b/core/src/test/java/org/elasticsearch/search/SearchCancellationIT.java new file mode 100644 index 00000000000..d615e95499d --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/SearchCancellationIT.java @@ -0,0 +1,313 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.action.ListenableActionFuture; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollAction; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.AbstractSearchScript; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.NativeScriptFactory; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.index.query.QueryBuilders.scriptQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) +public class SearchCancellationIT extends ESIntegTestCase { + + private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(ScriptedBlockPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + boolean lowLevelCancellation = randomBoolean(); + logger.info("Using lowLevelCancellation: {}", lowLevelCancellation); + return Settings.builder().put(SearchService.LOW_LEVEL_CANCELLATION_SETTING.getKey(), lowLevelCancellation).build(); + } + + private void indexTestData() { + for (int i = 0; i < 5; i++) { + // Make sure we have a few segments + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int j = 0; j < 20; j++) { + bulkRequestBuilder.add(client().prepareIndex("test", "type", Integer.toString(i * 5 + j)).setSource("field", "value")); + } + assertNoFailures(bulkRequestBuilder.get()); + } + } + + private List initBlockFactory() { + List plugins = new ArrayList<>(); + for (PluginsService pluginsService : internalCluster().getDataNodeInstances(PluginsService.class)) { + plugins.addAll(pluginsService.filterPlugins(ScriptedBlockPlugin.class)); + } + for (ScriptedBlockPlugin plugin : plugins) { + plugin.scriptedBlockFactory.reset(); + plugin.scriptedBlockFactory.enableBlock(); + } + return plugins; + } + + private void awaitForBlock(List plugins) throws Exception { + int numberOfShards = getNumShards("test").numPrimaries; + assertBusy(() -> { + int numberOfBlockedPlugins = 0; + for (ScriptedBlockPlugin plugin : plugins) { + numberOfBlockedPlugins += plugin.scriptedBlockFactory.hits.get(); + } + logger.info("The plugin blocked on {} out of {} shards", numberOfBlockedPlugins, numberOfShards); + assertThat(numberOfBlockedPlugins, greaterThan(0)); + }); + } + + private void disableBlocks(List plugins) throws Exception { + for (ScriptedBlockPlugin plugin : plugins) { + plugin.scriptedBlockFactory.disableBlock(); + } + } + + private void cancelSearch(String action) { + ListTasksResponse listTasksResponse = client().admin().cluster().prepareListTasks().setActions(action).get(); + assertThat(listTasksResponse.getTasks(), hasSize(1)); + TaskInfo searchTask = listTasksResponse.getTasks().get(0); + + logger.info("Cancelling search"); + CancelTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setTaskId(searchTask.getTaskId()).get(); + assertThat(cancelTasksResponse.getTasks(), hasSize(1)); + assertThat(cancelTasksResponse.getTasks().get(0).getTaskId(), equalTo(searchTask.getTaskId())); + } + + private SearchResponse ensureSearchWasCancelled(ListenableActionFuture searchResponse) { + try { + SearchResponse response = searchResponse.actionGet(); + logger.info("Search response {}", response); + assertNotEquals("At least one shard should have failed", 0, response.getFailedShards()); + return response; + } catch (SearchPhaseExecutionException ex) { + logger.info("All shards failed with", ex); + return null; + } + } + + public void testCancellationDuringQueryPhase() throws Exception { + + List plugins = initBlockFactory(); + indexTestData(); + + logger.info("Executing search"); + ListenableActionFuture searchResponse = client().prepareSearch("test").setQuery( + scriptQuery(new Script( + ScriptType.INLINE, "native", NativeTestScriptedBlockFactory.TEST_NATIVE_BLOCK_SCRIPT, Collections.emptyMap()))) + .execute(); + + awaitForBlock(plugins); + cancelSearch(SearchAction.NAME); + disableBlocks(plugins); + logger.info("Segments {}", XContentHelper.toString(client().admin().indices().prepareSegments("test").get(), FORMAT_PARAMS)); + ensureSearchWasCancelled(searchResponse); + } + + public void testCancellationDuringFetchPhase() throws Exception { + + List plugins = initBlockFactory(); + indexTestData(); + + logger.info("Executing search"); + ListenableActionFuture searchResponse = client().prepareSearch("test") + .addScriptField("test_field", + new Script(ScriptType.INLINE, "native", NativeTestScriptedBlockFactory.TEST_NATIVE_BLOCK_SCRIPT, Collections.emptyMap()) + ).execute(); + + awaitForBlock(plugins); + cancelSearch(SearchAction.NAME); + disableBlocks(plugins); + logger.info("Segments {}", XContentHelper.toString(client().admin().indices().prepareSegments("test").get(), FORMAT_PARAMS)); + ensureSearchWasCancelled(searchResponse); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/21126") + public void testCancellationOfScrollSearches() throws Exception { + + List plugins = initBlockFactory(); + indexTestData(); + + logger.info("Executing search"); + ListenableActionFuture searchResponse = client().prepareSearch("test") + .setScroll(TimeValue.timeValueSeconds(10)) + .setSize(5) + .setQuery( + scriptQuery(new Script( + ScriptType.INLINE, "native", NativeTestScriptedBlockFactory.TEST_NATIVE_BLOCK_SCRIPT, Collections.emptyMap()))) + .execute(); + + awaitForBlock(plugins); + cancelSearch(SearchAction.NAME); + disableBlocks(plugins); + ensureSearchWasCancelled(searchResponse); + } + + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/21126") + public void testCancellationOfScrollSearchesOnFollowupRequests() throws Exception { + + List plugins = initBlockFactory(); + indexTestData(); + + // Disable block so the first request would pass + disableBlocks(plugins); + + logger.info("Executing search"); + TimeValue keepAlive = TimeValue.timeValueSeconds(5); + SearchResponse searchResponse = client().prepareSearch("test") + .setScroll(keepAlive) + .setSize(2) + .setQuery( + scriptQuery(new Script( + ScriptType.INLINE, "native", NativeTestScriptedBlockFactory.TEST_NATIVE_BLOCK_SCRIPT, Collections.emptyMap()))) + .get(); + + assertNotNull(searchResponse.getScrollId()); + + // Enable block so the second request would block + for (ScriptedBlockPlugin plugin : plugins) { + plugin.scriptedBlockFactory.reset(); + plugin.scriptedBlockFactory.enableBlock(); + } + + String scrollId = searchResponse.getScrollId(); + logger.info("Executing scroll with id {}", scrollId); + ListenableActionFuture scrollResponse = client().prepareSearchScroll(searchResponse.getScrollId()) + .setScroll(keepAlive).execute(); + + awaitForBlock(plugins); + cancelSearch(SearchScrollAction.NAME); + disableBlocks(plugins); + + SearchResponse response = ensureSearchWasCancelled(scrollResponse); + if (response != null) { + // The response didn't fail completely - update scroll id + scrollId = response.getScrollId(); + } + logger.info("Cleaning scroll with id {}", scrollId); + client().prepareClearScroll().addScrollId(scrollId).get(); + } + + + public static class ScriptedBlockPlugin extends Plugin implements ScriptPlugin { + private NativeTestScriptedBlockFactory scriptedBlockFactory; + + public ScriptedBlockPlugin() { + scriptedBlockFactory = new NativeTestScriptedBlockFactory(); + } + + @Override + public List getNativeScripts() { + return Collections.singletonList(scriptedBlockFactory); + } + } + + private static class NativeTestScriptedBlockFactory implements NativeScriptFactory { + + public static final String TEST_NATIVE_BLOCK_SCRIPT = "native_test_search_block_script"; + + private final AtomicInteger hits = new AtomicInteger(); + + private final AtomicBoolean shouldBlock = new AtomicBoolean(true); + + public NativeTestScriptedBlockFactory() { + } + + public void reset() { + hits.set(0); + } + + public void disableBlock() { + shouldBlock.set(false); + } + + public void enableBlock() { + shouldBlock.set(true); + } + + @Override + public ExecutableScript newScript(Map params) { + return new NativeTestScriptedBlock(); + } + + @Override + public boolean needsScores() { + return false; + } + + @Override + public String getName() { + return TEST_NATIVE_BLOCK_SCRIPT; + } + + public class NativeTestScriptedBlock extends AbstractSearchScript { + @Override + public Object run() { + Loggers.getLogger(SearchCancellationIT.class).info("Blocking on the document {}", doc().get("_uid")); + hits.incrementAndGet(); + try { + awaitBusy(() -> shouldBlock.get() == false); + } catch (Exception e) { + throw new RuntimeException(e); + } + return true; + } + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/SearchCancellationTests.java b/core/src/test/java/org/elasticsearch/search/SearchCancellationTests.java new file mode 100644 index 00000000000..a50b7edf57a --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/SearchCancellationTests.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.TestUtil; +import org.elasticsearch.search.query.CancellableCollector; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +public class SearchCancellationTests extends ESTestCase { + + static Directory dir; + static IndexReader reader; + + @BeforeClass + public static void before() throws IOException { + dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + w.setDoRandomForceMerge(false); // we need 2 segments + indexRandomDocuments(w, TestUtil.nextInt(random(), 2, 20)); + w.flush(); + indexRandomDocuments(w, TestUtil.nextInt(random(), 1, 20)); + reader = w.getReader(); + w.close(); + } + + private static void indexRandomDocuments(RandomIndexWriter w, int numDocs) throws IOException { + for (int i = 0; i < numDocs; ++i) { + final int numHoles = random().nextInt(5); + for (int j = 0; j < numHoles; ++j) { + w.addDocument(new Document()); + } + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Field.Store.NO)); + w.addDocument(doc); + } + } + + @AfterClass + public static void after() throws IOException { + IOUtils.close(reader, dir); + dir = null; + reader = null; + } + + + public void testLowLevelCancellableCollector() throws IOException { + TotalHitCountCollector collector = new TotalHitCountCollector(); + AtomicBoolean cancelled = new AtomicBoolean(); + CancellableCollector cancellableCollector = new CancellableCollector(cancelled::get, true, collector); + final LeafCollector leafCollector = cancellableCollector.getLeafCollector(reader.leaves().get(0)); + leafCollector.collect(0); + cancelled.set(true); + expectThrows(TaskCancelledException.class, () -> leafCollector.collect(1)); + } + + public void testCancellableCollector() throws IOException { + TotalHitCountCollector collector = new TotalHitCountCollector(); + AtomicBoolean cancelled = new AtomicBoolean(); + CancellableCollector cancellableCollector = new CancellableCollector(cancelled::get, false, collector); + final LeafCollector leafCollector = cancellableCollector.getLeafCollector(reader.leaves().get(0)); + leafCollector.collect(0); + cancelled.set(true); + leafCollector.collect(1); + expectThrows(TaskCancelledException.class, () -> cancellableCollector.getLeafCollector(reader.leaves().get(1))); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java b/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java index 6f48dbe4911..031cab1286a 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java @@ -26,9 +26,13 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.ArrayUtils; import java.io.IOException; -import java.util.Arrays; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; public class SearchRequestTests extends AbstractSearchTestCase { @@ -77,95 +81,25 @@ public class SearchRequestTests extends AbstractSearchTestCase { } public void testEqualsAndHashcode() throws IOException { - SearchRequest firstSearchRequest = createSearchRequest(); - assertNotNull("search request is equal to null", firstSearchRequest); - assertNotEquals("search request is equal to incompatible type", firstSearchRequest, ""); - assertEquals("search request is not equal to self", firstSearchRequest, firstSearchRequest); - assertEquals("same source builder's hashcode returns different values if called multiple times", - firstSearchRequest.hashCode(), firstSearchRequest.hashCode()); + checkEqualsAndHashCode(createSearchRequest(), SearchRequestTests::copyRequest, this::mutate); + } - SearchRequest secondSearchRequest = copyRequest(firstSearchRequest); - assertEquals("search request is not equal to self", secondSearchRequest, secondSearchRequest); - assertEquals("search request is not equal to its copy", firstSearchRequest, secondSearchRequest); - assertEquals("search request is not symmetric", secondSearchRequest, firstSearchRequest); - assertEquals("search request copy's hashcode is different from original hashcode", - firstSearchRequest.hashCode(), secondSearchRequest.hashCode()); - - SearchRequest thirdSearchRequest = copyRequest(secondSearchRequest); - assertEquals("search request is not equal to self", thirdSearchRequest, thirdSearchRequest); - assertEquals("search request is not equal to its copy", secondSearchRequest, thirdSearchRequest); - assertEquals("search request copy's hashcode is different from original hashcode", - secondSearchRequest.hashCode(), thirdSearchRequest.hashCode()); - assertEquals("equals is not transitive", firstSearchRequest, thirdSearchRequest); - assertEquals("search request copy's hashcode is different from original hashcode", - firstSearchRequest.hashCode(), thirdSearchRequest.hashCode()); - assertEquals("equals is not symmetric", thirdSearchRequest, secondSearchRequest); - assertEquals("equals is not symmetric", thirdSearchRequest, firstSearchRequest); - - boolean changed = false; - if (randomBoolean()) { - secondSearchRequest.indices(generateRandomStringArray(10, 10, false, false)); - if (Arrays.equals(secondSearchRequest.indices(), firstSearchRequest.indices()) == false) { - changed = true; - } - } - if (randomBoolean()) { - secondSearchRequest.indicesOptions( - IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); - if (secondSearchRequest.indicesOptions().equals(firstSearchRequest.indicesOptions()) == false) { - changed = true; - } - } - if (randomBoolean()) { - secondSearchRequest.types(generateRandomStringArray(10, 10, false, false)); - if (Arrays.equals(secondSearchRequest.types(), firstSearchRequest.types()) == false) { - changed = true; - } - } - if (randomBoolean()) { - secondSearchRequest.preference(randomAsciiOfLengthBetween(3, 10)); - if (secondSearchRequest.preference().equals(firstSearchRequest.preference()) == false) { - changed = true; - } - } - if (randomBoolean()) { - secondSearchRequest.routing(randomAsciiOfLengthBetween(3, 10)); - if (secondSearchRequest.routing().equals(firstSearchRequest.routing()) == false) { - changed = true; - } - } - if (randomBoolean()) { - secondSearchRequest.requestCache(randomBoolean()); - if (secondSearchRequest.requestCache().equals(firstSearchRequest.requestCache()) == false) { - changed = true; - } - } - if (randomBoolean()) { - secondSearchRequest.scroll(randomPositiveTimeValue()); - if (secondSearchRequest.scroll().equals(firstSearchRequest.scroll()) == false) { - changed = true; - } - } - if (randomBoolean()) { - secondSearchRequest.searchType(randomFrom(SearchType.values())); - if (secondSearchRequest.searchType() != firstSearchRequest.searchType()) { - changed = true; - } - } - if (randomBoolean()) { - secondSearchRequest.source(createSearchSourceBuilder()); - if (secondSearchRequest.source().equals(firstSearchRequest.source()) == false) { - changed = true; - } - } - - if (changed) { - assertNotEquals(firstSearchRequest, secondSearchRequest); - assertNotEquals(firstSearchRequest.hashCode(), secondSearchRequest.hashCode()); - } else { - assertEquals(firstSearchRequest, secondSearchRequest); - assertEquals(firstSearchRequest.hashCode(), secondSearchRequest.hashCode()); - } + private SearchRequest mutate(SearchRequest searchRequest) throws IOException { + SearchRequest mutation = copyRequest(searchRequest); + List mutators = new ArrayList<>(); + mutators.add(() -> mutation.indices(ArrayUtils.concat(searchRequest.indices(), new String[] { randomAsciiOfLength(10) }))); + mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(searchRequest.indicesOptions(), + () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())))); + mutators.add(() -> mutation.types(ArrayUtils.concat(searchRequest.types(), new String[] { randomAsciiOfLength(10) }))); + mutators.add(() -> mutation.preference(randomValueOtherThan(searchRequest.preference(), () -> randomAsciiOfLengthBetween(3, 10)))); + mutators.add(() -> mutation.routing(randomValueOtherThan(searchRequest.routing(), () -> randomAsciiOfLengthBetween(3, 10)))); + mutators.add(() -> mutation.requestCache((randomValueOtherThan(searchRequest.requestCache(), () -> randomBoolean())))); + mutators.add(() -> mutation + .scroll(randomValueOtherThan(searchRequest.scroll(), () -> new Scroll(new TimeValue(randomPositiveLong() % 100000))))); + mutators.add(() -> mutation.searchType(randomValueOtherThan(searchRequest.searchType(), () -> randomFrom(SearchType.values())))); + mutators.add(() -> mutation.source(randomValueOtherThan(searchRequest.source(), this::createSearchSourceBuilder))); + randomFrom(mutators).run(); + return mutation; } private static SearchRequest copyRequest(SearchRequest searchRequest) throws IOException { diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 7de8f6a4988..717d64a53e7 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -25,8 +25,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -41,6 +43,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.ShardFetchRequest; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -173,11 +176,12 @@ public class SearchServiceTests extends ESSingleNodeTestCase { try { QuerySearchResultProvider querySearchResultProvider = service.executeQueryPhase( new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - new SearchSourceBuilder(), new String[0], false)); + new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY)), + new SearchTask(123L, "", "", "", null)); IntArrayList intCursors = new IntArrayList(1); intCursors.add(0); ShardFetchRequest req = new ShardFetchRequest(querySearchResultProvider.id(), intCursors, null /* not a scroll */); - service.executeFetchPhase(req); + service.executeFetchPhase(req, new SearchTask(123L, "", "", "", null)); } catch (AlreadyClosedException ex) { throw ex; } catch (IllegalStateException ex) { diff --git a/core/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java b/core/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java index 168574059f6..6a6838a9c4f 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java +++ b/core/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java @@ -28,8 +28,7 @@ import org.elasticsearch.script.AbstractSearchScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.NativeScriptFactory; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptModule; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; @@ -42,8 +41,6 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDI import static org.elasticsearch.index.query.QueryBuilders.scriptQuery; import static org.hamcrest.Matchers.equalTo; -/** - */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public class SearchTimeoutIT extends ESIntegTestCase { @@ -61,7 +58,8 @@ public class SearchTimeoutIT extends ESIntegTestCase { client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS)) - .setQuery(scriptQuery(new Script(NativeTestScriptedTimeout.TEST_NATIVE_SCRIPT_TIMEOUT, ScriptType.INLINE, "native", null))) + .setQuery(scriptQuery( + new Script(ScriptType.INLINE, "native", NativeTestScriptedTimeout.TEST_NATIVE_SCRIPT_TIMEOUT, Collections.emptyMap()))) .execute().actionGet(); assertThat(searchResponse.isTimedOut(), equalTo(true)); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/AggregationTestScriptsPlugin.java b/core/src/test/java/org/elasticsearch/search/aggregations/AggregationTestScriptsPlugin.java index 2c320288edf..bc98dda41d6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/AggregationTestScriptsPlugin.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/AggregationTestScriptsPlugin.java @@ -28,7 +28,8 @@ import java.util.Map; import java.util.function.Function; import static java.util.Collections.singletonMap; -import static org.elasticsearch.script.ScriptService.ScriptType; + +import org.elasticsearch.script.ScriptType; /** * This class contains various mocked scripts that are used in aggregations integration tests. @@ -43,7 +44,7 @@ public class AggregationTestScriptsPlugin extends MockScriptPlugin { // res[i] = values.get(i) - dec; // }; // return res; - public static final Script DECREMENT_ALL_VALUES = new Script("decrement all values", ScriptType.INLINE, NAME, singletonMap("dec", 1)); + public static final Script DECREMENT_ALL_VALUES = new Script(ScriptType.INLINE, NAME, "decrement all values", singletonMap("dec", 1)); @Override protected Map, Object>> pluginScripts() { @@ -90,7 +91,7 @@ public class AggregationTestScriptsPlugin extends MockScriptPlugin { return doc.get("values"); }); - scripts.put(DECREMENT_ALL_VALUES.getScript(), vars -> { + scripts.put(DECREMENT_ALL_VALUES.getIdOrCode(), vars -> { int dec = (int) vars.get("dec"); Map doc = (Map) vars.get("doc"); ScriptDocValues.Longs values = (ScriptDocValues.Longs) doc.get("values"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index ca5b98af300..b20278895b0 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -31,11 +31,11 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; -import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -44,7 +44,7 @@ import java.util.Collections; import java.util.List; import static java.util.Collections.emptyList; -import static org.hamcrest.Matchers.equalTo; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; public abstract class BaseAggregationTestCase> extends ESTestCase { @@ -72,6 +72,7 @@ public abstract class BaseAggregationTestCase> extends ESTestCase { @@ -73,6 +73,7 @@ public abstract class BasePipelineAggregationTestCase params = new HashMap<>(); params.put("fieldname", "date"); SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").script(new Script(DateScriptMocks.ExtractFieldScript.NAME, - ScriptType.INLINE, "native", params)).dateHistogramInterval(DateHistogramInterval.MONTH)) + .addAggregation(dateHistogram("histo").script(new Script(ScriptType.INLINE, "native", DateScriptMocks.ExtractFieldScript.NAME, + params)).dateHistogramInterval(DateHistogramInterval.MONTH)) .execute().actionGet(); assertSearchResponse(response); @@ -726,8 +722,8 @@ public class DateHistogramIT extends ESIntegTestCase { Map params = new HashMap<>(); params.put("fieldname", "dates"); SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").script(new Script(DateScriptMocks.ExtractFieldScript.NAME, - ScriptType.INLINE, "native", params)).dateHistogramInterval(DateHistogramInterval.MONTH)) + .addAggregation(dateHistogram("histo").script(new Script(ScriptType.INLINE, "native", DateScriptMocks.ExtractFieldScript.NAME, + params)).dateHistogramInterval(DateHistogramInterval.MONTH)) .execute().actionGet(); assertSearchResponse(response); @@ -1009,20 +1005,13 @@ public class DateHistogramIT extends ESIntegTestCase { DateMathParser parser = new DateMathParser(Joda.getStrictStandardDateFormatter()); - final Callable callable = new Callable() { - @Override - public Long call() throws Exception { - return System.currentTimeMillis(); - } - }; - // we pick a random timezone offset of +12/-12 hours and insert two documents // one at 00:00 in that time zone and one at 12:00 List builders = new ArrayList<>(); int timeZoneHourOffset = randomIntBetween(-12, 12); DateTimeZone timezone = DateTimeZone.forOffsetHours(timeZoneHourOffset); - DateTime timeZoneStartToday = new DateTime(parser.parse("now/d", callable, false, timezone), DateTimeZone.UTC); - DateTime timeZoneNoonToday = new DateTime(parser.parse("now/d+12h", callable, false, timezone), DateTimeZone.UTC); + DateTime timeZoneStartToday = new DateTime(parser.parse("now/d", System::currentTimeMillis, false, timezone), DateTimeZone.UTC); + DateTime timeZoneNoonToday = new DateTime(parser.parse("now/d+12h", System::currentTimeMillis, false, timezone), DateTimeZone.UTC); builders.add(indexDoc(index, timeZoneStartToday, 1)); builders.add(indexDoc(index, timeZoneNoonToday, 2)); indexRandom(true, builders); @@ -1203,4 +1192,46 @@ public class DateHistogramIT extends ESIntegTestCase { assertThat(((DateTime) buckets.get(2).getKey()).getMillis() - ((DateTime) buckets.get(1).getKey()).getMillis(), equalTo(3600000L)); assertThat(((DateTime) buckets.get(3).getKey()).getMillis() - ((DateTime) buckets.get(2).getKey()).getMillis(), equalTo(3600000L)); } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=date") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("d", date(1, 1)), + client().prepareIndex("cache_test_idx", "type", "2").setSource("d", date(2, 1))); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + Map params = new HashMap<>(); + params.put("fieldname", "d"); + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateHistogram("histo").field("d") + .script(new Script(ScriptType.INLINE, "native", DateScriptMocks.PlusOneMonthScript.NAME, params)) + .dateHistogramInterval(DateHistogramInterval.MONTH)).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(dateHistogram("histo").field("d").dateHistogramInterval(DateHistogramInterval.MONTH)).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java index 9be3a83bbc9..dcc80f69903 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -21,19 +21,15 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.transport.AssertingLocalTransport; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutionException; @@ -58,11 +54,6 @@ public class DateHistogramOffsetIT extends ESIntegTestCase { return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(date); } - @Override - protected Collection> nodePlugins() { - return Collections.singleton(AssertingLocalTransport.TestPlugin.class); - } - @Before public void beforeEachTest() throws IOException { prepareCreate("idx2").addMapping("type", "date", "type=date").execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index 1e250681004..71d97d3969e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -20,9 +20,10 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.bucket.DateScriptMocks.DateScriptsMockPlugin; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.range.Range; @@ -53,9 +54,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; -/** - * - */ @ESIntegTestCase.SuiteScopeTestCase public class DateRangeIT extends ESIntegTestCase { @@ -121,7 +119,7 @@ public class DateRangeIT extends ESIntegTestCase { if (randomBoolean()) { rangeBuilder.field("date"); } else { - rangeBuilder.script(new Script(DateScriptMocks.ExtractFieldScript.NAME, ScriptType.INLINE, "native", params)); + rangeBuilder.script(new Script(ScriptType.INLINE, "native", DateScriptMocks.ExtractFieldScript.NAME, params)); } SearchResponse response = client() .prepareSearch("idx") @@ -543,7 +541,7 @@ public class DateRangeIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateRange("range") .field("dates") - .script(new Script(DateScriptMocks.PlusOneMonthScript.NAME, ScriptType.INLINE, "native", params)) + .script(new Script(ScriptType.INLINE, "native", DateScriptMocks.PlusOneMonthScript.NAME, params)) .addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15))).execute() .actionGet(); @@ -599,7 +597,7 @@ public class DateRangeIT extends ESIntegTestCase { params.put("fieldname", "date"); SearchResponse response = client().prepareSearch("idx") .addAggregation(dateRange("range") - .script(new Script(DateScriptMocks.ExtractFieldScript.NAME, ScriptType.INLINE, "native", params)) + .script(new Script(ScriptType.INLINE, "native", DateScriptMocks.ExtractFieldScript.NAME, params)) .addUnboundedTo(date(2, 15)) .addRange(date(2, 15), date(3, 15)) .addUnboundedFrom(date(3, 15))) @@ -661,7 +659,7 @@ public class DateRangeIT extends ESIntegTestCase { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - dateRange("range").script(new Script(DateScriptMocks.ExtractFieldScript.NAME, ScriptType.INLINE, "native", params)) + dateRange("range").script(new Script(ScriptType.INLINE, "native", DateScriptMocks.ExtractFieldScript.NAME, params)) .addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)) .addUnboundedFrom(date(3, 15))).execute().actionGet(); @@ -866,4 +864,51 @@ public class DateRangeIT extends ESIntegTestCase { assertThat(buckets.get(0).getDocCount(), equalTo(0L)); assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true)); } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "date", "type=date") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, + client().prepareIndex("cache_test_idx", "type", "1") + .setSource(jsonBuilder().startObject().field("date", date(1, 1)).endObject()), + client().prepareIndex("cache_test_idx", "type", "2") + .setSource(jsonBuilder().startObject().field("date", date(2, 1)).endObject())); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + Map params = new HashMap<>(); + params.put("fieldname", "date"); + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateRange("foo").field("date") + .script(new Script(ScriptType.INLINE, "native", DateScriptMocks.PlusOneMonthScript.NAME, params)) + .addRange(new DateTime(2012, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC), new DateTime(2013, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateRange("foo").field("date") + .addRange(new DateTime(2012, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC), new DateTime(2013, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index ed43ba8bd63..1dc9943e8a3 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -21,11 +21,13 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScoreAccessor; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.filter.Filter; @@ -56,7 +58,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; -import static org.elasticsearch.script.ScriptService.ScriptType; import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; @@ -451,7 +452,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("_value + 1", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -504,7 +505,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(MULTI_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("_value + 1", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -533,7 +534,8 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(MULTI_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("(long) (_value / 1000 + 1)", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .script( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "(long) (_value / 1000 + 1)", Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -575,8 +577,8 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .addAggregation( terms("terms") .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "'].value", ScriptType.INLINE, - CustomScriptPlugin.NAME, null))) + .script(new Script(ScriptType.INLINE, + CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -602,8 +604,8 @@ public class DoubleTermsIT extends AbstractTermsTestCase { .addAggregation( terms("terms") .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']", ScriptType.INLINE, - CustomScriptPlugin.NAME, null))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -1077,10 +1079,11 @@ public class DoubleTermsIT extends AbstractTermsTestCase { } public void testScriptScore() { - Script scoringScript = - new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", ScriptType.INLINE, CustomScriptPlugin .NAME, null); + Script scoringScript = new Script( + ScriptType.INLINE, CustomScriptPlugin .NAME, "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()); - Script aggregationScript = new Script("ceil(_score.doubleValue()/3)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script aggregationScript = new Script( + ScriptType.INLINE, CustomScriptPlugin.NAME, "ceil(_score.doubleValue()/3)", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -1179,4 +1182,43 @@ public class DoubleTermsIT extends AbstractTermsTestCase { public void testOtherDocCount() { testOtherDocCount(SINGLE_VALUED_FIELD_NAME, MULTI_VALUED_FIELD_NAME); } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=float") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1.5), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2.5)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( + terms("terms").field("d").script( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap()))).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(terms("terms").field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 288e9d4dcc5..32dcb1a4eb3 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -56,9 +56,6 @@ import static org.hamcrest.Matchers.sameInstance; import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; -/** - * - */ @ESIntegTestCase.SuiteScopeTestCase public class GeoDistanceIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java index 9cebfeb9824..b226c6dbd2d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java @@ -38,9 +38,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; import static org.hamcrest.core.IsNull.notNullValue; -/** - * - */ @ESIntegTestCase.SuiteScopeTestCase public class GlobalIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index e24f0d39d4b..e25aeae29ff 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -21,11 +21,12 @@ package org.elasticsearch.search.aggregations.bucket; import com.carrotsearch.hppc.LongHashSet; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -561,7 +562,7 @@ public class HistogramIT extends ESIntegTestCase { .addAggregation( histogram("histo") .field(SINGLE_VALUED_FIELD_NAME) - .script(new Script("_value + 1", ScriptType.INLINE, CustomScriptPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) .interval(interval)) .execute().actionGet(); @@ -638,7 +639,7 @@ public class HistogramIT extends ESIntegTestCase { .addAggregation( histogram("histo") .field(MULTI_VALUED_FIELD_NAME) - .script(new Script("_value + 1", ScriptType.INLINE, CustomScriptPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) .interval(interval)) .execute().actionGet(); @@ -674,7 +675,7 @@ public class HistogramIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch("idx") .addAggregation( histogram("histo") - .script(new Script("doc['l_value'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_value'].value", emptyMap())) .interval(interval)) .execute().actionGet(); @@ -698,7 +699,7 @@ public class HistogramIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch("idx") .addAggregation( histogram("histo") - .script(new Script("doc['l_values']", ScriptType.INLINE, CustomScriptPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_values']", emptyMap())) .interval(interval)) .execute().actionGet(); @@ -995,4 +996,43 @@ public class HistogramIT extends ESIntegTestCase { assertEquals(0.05, (double) buckets.get(1).getKey(), 0.01d); assertEquals(1, buckets.get(1).getDocCount()); } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=float") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("d", -0.6), + client().prepareIndex("cache_test_idx", "type", "2").setSource("d", 0.1)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(histogram("histo").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())).interval(0.7).offset(0.05)).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java index d9f2ed7c365..cc4818963ad 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java @@ -37,8 +37,7 @@ import org.elasticsearch.script.AbstractSearchScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.NativeScriptFactory; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptModule; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.test.ESIntegTestCase; @@ -209,7 +208,7 @@ public class IpRangeIT extends ESIntegTestCase { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client().prepareSearch("idx").addAggregation( AggregationBuilders.ipRange("my_range") - .script(new Script(DummyScript.NAME, ScriptType.INLINE, "native", Collections.emptyMap())) ).get()); + .script(new Script(ScriptType.INLINE, "native", DummyScript.NAME, Collections.emptyMap())) ).get()); assertThat(e.getMessage(), containsString("[ip_range] does not support scripts")); } @@ -218,7 +217,7 @@ public class IpRangeIT extends ESIntegTestCase { () -> client().prepareSearch("idx").addAggregation( AggregationBuilders.ipRange("my_range") .field("ip") - .script(new Script(DummyScript.NAME, ScriptType.INLINE, "native", Collections.emptyMap())) ).get()); + .script(new Script(ScriptType.INLINE, "native", DummyScript.NAME, Collections.emptyMap())) ).get()); assertThat(e.getMessage(), containsString("[ip_range] does not support scripts")); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index 6d5e190b542..1739d09a054 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -21,10 +21,12 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.filter.Filter; @@ -53,7 +55,6 @@ import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.script.ScriptService.ScriptType; import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; @@ -441,7 +442,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("_value + 1", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -494,7 +495,7 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(MULTI_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("_value - 1", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value - 1", Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -523,7 +524,8 @@ public class LongTermsIT extends AbstractTermsTestCase { .addAggregation(terms("terms") .field(MULTI_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("floor(_value / 1000 + 1)", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .script(new Script( + ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / 1000 + 1)", Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -559,7 +561,8 @@ public class LongTermsIT extends AbstractTermsTestCase { */ public void testScriptSingleValue() throws Exception { - Script script = new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx").setTypes("type") .addAggregation(terms("terms") @@ -585,7 +588,8 @@ public class LongTermsIT extends AbstractTermsTestCase { } public void testScriptMultiValued() throws Exception { - Script script = new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx").setTypes("type") .addAggregation(terms("terms") @@ -1136,4 +1140,43 @@ public class LongTermsIT extends AbstractTermsTestCase { public void testOtherDocCount() { testOtherDocCount(SINGLE_VALUED_FIELD_NAME, MULTI_VALUED_FIELD_NAME); } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( + terms("terms").field("d").script( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap()))).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(terms("terms").field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index 012df7bfbff..925ff86232a 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -151,8 +151,8 @@ public class MinDocCountIT extends AbstractTermsTestCase { YES { @Override TermsAggregationBuilder apply(TermsAggregationBuilder builder, String field) { - return builder.script(new org.elasticsearch.script.Script("doc['" + field + "'].values", ScriptService.ScriptType.INLINE, - CustomScriptPlugin.NAME, null)); + return builder.script(new org.elasticsearch.script.Script(ScriptType.INLINE, + CustomScriptPlugin.NAME, "doc['" + field + "'].values", Collections.emptyMap())); } }; abstract TermsAggregationBuilder apply(TermsAggregationBuilder builder, String field); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java index d3e934d875f..fc4fac72ee7 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java @@ -20,9 +20,11 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -36,17 +38,18 @@ import org.hamcrest.Matchers; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.script.ScriptService.ScriptType; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.range; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -391,7 +394,7 @@ public class RangeIT extends ESIntegTestCase { .addAggregation( range("range") .field(SINGLE_VALUED_FIELD_NAME) - .script(new Script("_value + 1", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) .addUnboundedTo(3) .addRange(3, 6) .addUnboundedFrom(6)) @@ -511,7 +514,7 @@ public class RangeIT extends ESIntegTestCase { .addAggregation( range("range") .field(MULTI_VALUED_FIELD_NAME) - .script(new Script("_value + 1", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) .addUnboundedTo(3) .addRange(3, 6) .addUnboundedFrom(6)) @@ -572,7 +575,8 @@ public class RangeIT extends ESIntegTestCase { */ public void testScriptSingleValue() throws Exception { - Script script = new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") .addAggregation( @@ -657,7 +661,8 @@ public class RangeIT extends ESIntegTestCase { } public void testScriptMultiValued() throws Exception { - Script script = new Script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "'].values", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -907,4 +912,47 @@ public class RangeIT extends ESIntegTestCase { assertThat(buckets.get(0).getDocCount(), equalTo(0L)); } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "i", "type=integer") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, + client().prepareIndex("cache_test_idx", "type", "1").setSource(jsonBuilder().startObject().field("i", 1).endObject()), + client().prepareIndex("cache_test_idx", "type", "2").setSource(jsonBuilder().startObject().field("i", 2).endObject())); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + Map params = new HashMap<>(); + params.put("fieldname", "date"); + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( + range("foo").field("i").script( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())).addRange(0, 10)) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(range("foo").field("i").addRange(0, 10)).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsIT.java index 7284901e94c..3de40c16c28 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsIT.java @@ -58,9 +58,6 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; -/** - * - */ @ESIntegTestCase.SuiteScopeTestCase public class SignificantTermsIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index fab1f8b7d3e..8852716377e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; @@ -31,7 +32,7 @@ import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.script.NativeScriptFactory; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; @@ -55,6 +56,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -74,9 +76,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; -/** - * - */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { @@ -509,14 +508,13 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { } private ScriptHeuristic getScriptSignificanceHeuristic() throws IOException { - Script script = null; + Script script; if (randomBoolean()) { - Map params = null; - params = new HashMap<>(); + Map params = new HashMap<>(); params.put("param", randomIntBetween(1, 100)); - script = new Script("native_significance_score_script_with_params", ScriptType.INLINE, "native", params); + script = new Script(ScriptType.INLINE, "native", "native_significance_score_script_with_params", params); } else { - script = new Script("native_significance_score_script_no_params", ScriptType.INLINE, "native", null); + script = new Script(ScriptType.INLINE, "native", "native_significance_score_script_no_params", Collections.emptyMap()); } return new ScriptHeuristic(script); } @@ -547,4 +545,43 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { SharedSignificantTermsTestMethods.aggregateAndCheckFromSeveralShards(this); } + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + ScriptHeuristic scriptHeuristic = getScriptSignificanceHeuristic(); + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(significantTerms("foo").field("s")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } + } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java index 8ccccf3f33a..3d5d13bf04a 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java @@ -24,11 +24,13 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -61,7 +63,6 @@ import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.script.ScriptService.ScriptType; import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.count; import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats; @@ -574,7 +575,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("'foo_' + _value", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -601,7 +602,8 @@ public class StringTermsIT extends AbstractTermsTestCase { .executionHint(randomExecutionHint()) .field(MULTI_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("_value.substring(0,3)", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .script(new Script( + ScriptType.INLINE, CustomScriptPlugin.NAME, "_value.substring(0,3)", Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -651,7 +653,8 @@ public class StringTermsIT extends AbstractTermsTestCase { .addAggregation( terms("terms") .executionHint(randomExecutionHint()) - .script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .script(new Script(ScriptType.INLINE, + CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", Collections.emptyMap())) .collectMode(randomFrom(SubAggCollectionMode.values()))) .get(); @@ -683,7 +686,7 @@ public class StringTermsIT extends AbstractTermsTestCase { .executionHint(randomExecutionHint()) .field(MULTI_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script("'foo_' + _value", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -718,7 +721,8 @@ public class StringTermsIT extends AbstractTermsTestCase { */ public void testScriptSingleValue() throws Exception { - Script script = new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -746,7 +750,8 @@ public class StringTermsIT extends AbstractTermsTestCase { } public void testScriptSingleValueExplicitSingleValue() throws Exception { - Script script = new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -781,7 +786,8 @@ public class StringTermsIT extends AbstractTermsTestCase { terms("terms") .collectMode(randomFrom(SubAggCollectionMode.values())) .executionHint(randomExecutionHint()) - .script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .script(new Script(ScriptType.INLINE, + CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", Collections.emptyMap()))) .get(); assertSearchResponse(response); @@ -1511,4 +1517,45 @@ public class StringTermsIT extends AbstractTermsTestCase { public void testOtherDocCount() { testOtherDocCount(SINGLE_VALUED_FIELD_NAME, MULTI_VALUED_FIELD_NAME); } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=keyword") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", "foo"), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", "bar")); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + terms("terms").field("d").script( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap()))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(terms("terms").field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index 2ea319daa9c..764ed3ba73b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -39,9 +39,6 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.signific import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class TermsShardMinDocCountIT extends ESIntegTestCase { private static final String index = "someindex"; private static final String type = "testtype"; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java index 6b7e51cc90f..e2609f97eaa 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java @@ -19,22 +19,26 @@ package org.elasticsearch.search.aggregations.bucket.histogram; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPoolStats; import org.joda.time.DateTimeZone; import org.joda.time.Instant; @@ -92,8 +96,13 @@ public class ExtendedBoundsTests extends ESTestCase { public void testParseAndValidate() { long now = randomLong(); + Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build(); SearchContext context = mock(SearchContext.class); - when(context.nowInMillis()).thenReturn(now); + QueryShardContext qsc = new QueryShardContext(0, + new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings), null, null, null, null, + null, null, null, null, null, () -> now); + when(context.getQueryShardContext()).thenReturn(qsc); FormatDateTimeFormatter formatter = Joda.forPattern("dateOptionalTime"); DocValueFormat format = new DocValueFormat.DateTime(formatter, DateTimeZone.UTC); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index a3bb846c3e1..d658a36c07d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -55,8 +55,6 @@ import java.util.List; import static org.hamcrest.Matchers.equalTo; -/** - */ public class NestedAggregatorTests extends ESSingleNodeTestCase { public void testResetRootDocId() throws Exception { Directory directory = newDirectory(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java index 7cca4baadea..6a4eec89f23 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -46,9 +46,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -/** - * - */ @ESIntegTestCase.SuiteScopeTestCase public abstract class AbstractGeoTestCase extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java index ccf03df37fc..66fd9653c4c 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptEngineService; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -56,14 +56,13 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -/** - * - */ public class AvgIT extends AbstractNumericTestCase { @Override @@ -168,7 +167,7 @@ public class AvgIT extends AbstractNumericTestCase { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(avg("avg").field("value") - .script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, null))) + .script(new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", Collections.emptyMap()))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -185,7 +184,7 @@ public class AvgIT extends AbstractNumericTestCase { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(avg("avg").field("value") - .script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, params))) + .script(new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", params))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -229,7 +228,7 @@ public class AvgIT extends AbstractNumericTestCase { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(avg("avg").field("values") - .script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, null))) + .script(new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", Collections.emptyMap()))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -246,7 +245,7 @@ public class AvgIT extends AbstractNumericTestCase { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(avg("avg").field("values") - .script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, params))) + .script(new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", params))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -262,7 +261,7 @@ public class AvgIT extends AbstractNumericTestCase { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(avg("avg") - .script(new Script("value", ScriptType.INLINE, ExtractFieldScriptEngine.NAME, null))) + .script(new Script(ScriptType.INLINE, ExtractFieldScriptEngine.NAME, "value", Collections.emptyMap()))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -279,7 +278,7 @@ public class AvgIT extends AbstractNumericTestCase { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(avg("avg") - .script(new Script("value", ScriptType.INLINE, ExtractFieldScriptEngine.NAME, params))) + .script(new Script(ScriptType.INLINE, ExtractFieldScriptEngine.NAME, "value", params))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -295,7 +294,7 @@ public class AvgIT extends AbstractNumericTestCase { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(avg("avg") - .script(new Script("values", ScriptType.INLINE, ExtractFieldScriptEngine.NAME, null))) + .script(new Script(ScriptType.INLINE, ExtractFieldScriptEngine.NAME, "values", Collections.emptyMap()))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -312,7 +311,7 @@ public class AvgIT extends AbstractNumericTestCase { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation(avg("avg") - .script(new Script("values", ScriptType.INLINE, ExtractFieldScriptEngine.NAME, params))) + .script(new Script(ScriptType.INLINE, ExtractFieldScriptEngine.NAME, "values", params))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -353,6 +352,45 @@ public class AvgIT extends AbstractNumericTestCase { } } + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(avg("foo").field("d").script( + new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", Collections.emptyMap()))).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(avg("foo").field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } + /** * Mock plugin for the {@link ExtractFieldScriptEngine} */ diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java index cff1fa746dc..17785d2cb32 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.terms.Terms; @@ -41,10 +42,10 @@ import java.util.function.Function; import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.script.ScriptService.ScriptType; import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -305,7 +306,7 @@ public class CardinalityIT extends ESIntegTestCase { .addAggregation( cardinality("cardinality") .precisionThreshold(precisionThreshold) - .script(new Script("doc['str_value'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_value'].value", emptyMap()))) .execute().actionGet(); assertSearchResponse(response); @@ -321,7 +322,7 @@ public class CardinalityIT extends ESIntegTestCase { .addAggregation( cardinality("cardinality") .precisionThreshold(precisionThreshold) - .script(new Script("doc['str_values'].values", ScriptType.INLINE, CustomScriptPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_values'].values", emptyMap()))) .execute().actionGet(); assertSearchResponse(response); @@ -333,7 +334,7 @@ public class CardinalityIT extends ESIntegTestCase { } public void testSingleValuedNumericScript() throws Exception { - Script script = new Script("doc[' + singleNumericField() + '].value", ScriptType.INLINE, CustomScriptPlugin.NAME, emptyMap()); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc[' + singleNumericField() + '].value", emptyMap()); SearchResponse response = client().prepareSearch("idx").setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) .execute().actionGet(); @@ -347,7 +348,8 @@ public class CardinalityIT extends ESIntegTestCase { } public void testMultiValuedNumericScript() throws Exception { - Script script = new Script("doc[' + multiNumericField(false) + '].values", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc[' + multiNumericField(false) + '].values", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx").setTypes("type") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) .execute().actionGet(); @@ -366,7 +368,7 @@ public class CardinalityIT extends ESIntegTestCase { cardinality("cardinality") .precisionThreshold(precisionThreshold) .field("str_value") - .script(new Script("_value", ScriptType.INLINE, CustomScriptPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap()))) .execute().actionGet(); assertSearchResponse(response); @@ -383,7 +385,7 @@ public class CardinalityIT extends ESIntegTestCase { cardinality("cardinality") .precisionThreshold(precisionThreshold) .field("str_values") - .script(new Script("_value", ScriptType.INLINE, CustomScriptPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap()))) .execute().actionGet(); assertSearchResponse(response); @@ -400,7 +402,7 @@ public class CardinalityIT extends ESIntegTestCase { cardinality("cardinality") .precisionThreshold(precisionThreshold) .field(singleNumericField()) - .script(new Script("_value", ScriptType.INLINE, CustomScriptPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap()))) .execute().actionGet(); assertSearchResponse(response); @@ -417,7 +419,7 @@ public class CardinalityIT extends ESIntegTestCase { cardinality("cardinality") .precisionThreshold(precisionThreshold) .field(multiNumericField(false)) - .script(new Script("_value", ScriptType.INLINE, CustomScriptPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap()))) .execute().actionGet(); assertSearchResponse(response); @@ -445,4 +447,44 @@ public class CardinalityIT extends ESIntegTestCase { assertCount(count, 2); } } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + cardinality("foo").field("d").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap()))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(cardinality("foo").field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 6f4f891ea65..6eb0bba6ae4 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -19,9 +19,10 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -46,7 +47,9 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.missing; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -298,7 +301,8 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { .addAggregation( extendedStats("stats") .field("value") - .script(new Script("_value + 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, null)) + .script(new Script(ScriptType.INLINE, + AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) .sigma(sigma)) .execute().actionGet(); @@ -328,7 +332,7 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { .addAggregation( extendedStats("stats") .field("value") - .script(new Script("_value + inc", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params)) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) .sigma(sigma)) .execute().actionGet(); @@ -380,7 +384,8 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { .addAggregation( extendedStats("stats") .field("values") - .script(new Script("_value - 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, null)) + .script(new Script(ScriptType.INLINE, + AggregationTestScriptsPlugin.NAME, "_value - 1", Collections.emptyMap())) .sigma(sigma)) .execute().actionGet(); @@ -410,7 +415,7 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { .addAggregation( extendedStats("stats") .field("values") - .script(new Script("_value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params)) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) .sigma(sigma)) .get(); @@ -437,7 +442,8 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { .setQuery(matchAllQuery()) .addAggregation( extendedStats("stats") - .script(new Script("doc['value'].value", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, null)) + .script(new Script(ScriptType.INLINE, + AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap())) .sigma(sigma)) .execute().actionGet(); @@ -462,7 +468,7 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("inc", 1); - Script script = new Script("doc['value'].value + inc", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params); double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") @@ -496,7 +502,8 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { .setQuery(matchAllQuery()) .addAggregation( extendedStats("stats") - .script(new Script("doc['values'].values", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, null)) + .script(new Script(ScriptType.INLINE, + AggregationTestScriptsPlugin.NAME, "doc['values'].values", Collections.emptyMap())) .sigma(sigma)) .execute().actionGet(); @@ -521,8 +528,8 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("dec", 1); - Script script = new Script("[ doc['value'].value, doc['value'].value - dec ]", ScriptType.INLINE, - AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "[ doc['value'].value, doc['value'].value - dec ]", + params); double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") @@ -630,4 +637,44 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { assertThat(stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER), equalTo(stats.getAvg() - (stats.getStdDeviation() * sigma))); } + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(extendedStats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap()))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(extendedStats("foo").field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } + } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 57bdc7d5dfc..29184285195 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -20,9 +20,10 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -49,7 +50,9 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.percentileRanks; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -257,7 +260,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { .method(PercentilesMethod.HDR) .numberOfSignificantValueDigits(sigDigits) .field("value") - .script(new Script("_value - 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) .values(pcts)) .execute().actionGet(); @@ -281,7 +284,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { .method(PercentilesMethod.HDR) .numberOfSignificantValueDigits(sigDigits) .field("value") - .script(new Script("_value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params)) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) .values(pcts)) .execute().actionGet(); @@ -321,7 +324,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { .method(PercentilesMethod.HDR) .numberOfSignificantValueDigits(sigDigits) .field("values") - .script(new Script("_value - 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) .values(pcts)) .execute().actionGet(); @@ -342,7 +345,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { .method(PercentilesMethod.HDR) .numberOfSignificantValueDigits(sigDigits) .field("values") - .script(new Script("20 - _value", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) .values(pcts)) .execute().actionGet(); @@ -366,7 +369,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { .method(PercentilesMethod.HDR) .numberOfSignificantValueDigits(sigDigits) .field("values") - .script(new Script("_value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params)) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) .values(pcts)) .execute().actionGet(); @@ -387,7 +390,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { percentileRanks("percentile_ranks") .method(PercentilesMethod.HDR) .numberOfSignificantValueDigits(sigDigits) - .script(new Script("doc['value'].value", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) .values(pcts)) .execute().actionGet(); @@ -403,7 +406,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("dec", 1); - Script script = new Script("doc['value'].value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); SearchResponse searchResponse = client() @@ -428,7 +431,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValues, maxValues); - Script script = new Script("doc['values'].values", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values'].values", emptyMap()); SearchResponse searchResponse = client() .prepareSearch("idx") @@ -530,4 +533,45 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { } } + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client() + .prepareSearch("cache_test_idx").setSize(0).addAggregation(percentileRanks("foo").method(PercentilesMethod.HDR).field("d") + .values(50.0).script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(percentileRanks("foo").method(PercentilesMethod.HDR).field("d").values(50.0)).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } + } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index 8112551f53c..32fdd02a876 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -20,10 +20,11 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -49,7 +50,9 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -242,7 +245,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { .numberOfSignificantValueDigits(sigDigits) .method(PercentilesMethod.HDR) .field("value") - .script(new Script("_value - 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) .percentiles(pcts)) .execute().actionGet(); @@ -267,7 +270,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { .numberOfSignificantValueDigits(sigDigits) .method(PercentilesMethod.HDR) .field("value") - .script(new Script("_value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params)) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) .percentiles(pcts)) .execute().actionGet(); @@ -307,7 +310,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { .numberOfSignificantValueDigits(sigDigits) .method(PercentilesMethod.HDR) .field("values") - .script(new Script("_value - 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) .percentiles(pcts)) .execute().actionGet(); @@ -328,7 +331,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { .numberOfSignificantValueDigits(sigDigits) .method(PercentilesMethod.HDR) .field("values") - .script(new Script("20 - _value", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) .percentiles(pcts)) .execute().actionGet(); @@ -353,7 +356,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { .numberOfSignificantValueDigits(sigDigits) .method(PercentilesMethod.HDR) .field("values") - .script(new Script("_value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params)) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) .percentiles(pcts)) .execute().actionGet(); @@ -374,7 +377,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { percentiles("percentiles") .numberOfSignificantValueDigits(sigDigits) .method(PercentilesMethod.HDR) - .script(new Script("doc['value'].value", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) .percentiles(pcts)) .execute().actionGet(); @@ -389,7 +392,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("dec", 1); - Script script = new Script("doc['value'].value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); @@ -415,7 +418,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - Script script = new Script("doc['values'].values", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values'].values", emptyMap()); SearchResponse searchResponse = client() .prepareSearch("idx") @@ -521,4 +524,45 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { } } -} \ No newline at end of file + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(percentiles("foo").method(PercentilesMethod.HDR).field("d").percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(percentiles("foo").method(PercentilesMethod.HDR).field("d").percentiles(50.0)).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java index d4ac8835ee0..4a651a0ad55 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java @@ -19,9 +19,10 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -44,13 +45,12 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.max; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; -/** - * - */ public class MaxIT extends AbstractNumericTestCase { @Override protected Collection> nodePlugins() { @@ -165,7 +165,7 @@ public class MaxIT extends AbstractNumericTestCase { .addAggregation( max("max") .field("value") - .script(new Script("_value + 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", emptyMap()))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -185,7 +185,7 @@ public class MaxIT extends AbstractNumericTestCase { .addAggregation( max("max") .field("value") - .script(new Script("_value + inc", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params))) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params))) .get(); assertHitCount(searchResponse, 10); @@ -218,7 +218,7 @@ public class MaxIT extends AbstractNumericTestCase { .addAggregation( max("max") .field("values") - .script(new Script("_value + 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", emptyMap()))) .get(); assertHitCount(searchResponse, 10); @@ -238,7 +238,7 @@ public class MaxIT extends AbstractNumericTestCase { .addAggregation( max("max") .field("values") - .script(new Script("_value + inc", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params))) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params))) .get(); assertHitCount(searchResponse, 10); @@ -255,7 +255,7 @@ public class MaxIT extends AbstractNumericTestCase { .setQuery(matchAllQuery()) .addAggregation( max("max") - .script(new Script("doc['value'].value", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -271,7 +271,7 @@ public class MaxIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("inc", 1); - Script script = new Script("doc['value'].value + inc", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -292,7 +292,8 @@ public class MaxIT extends AbstractNumericTestCase { .setQuery(matchAllQuery()) .addAggregation( max("max") - .script(new Script("doc['values'].values", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, null))) + .script(new Script(ScriptType.INLINE, + AggregationTestScriptsPlugin.NAME, "doc['values'].values", Collections.emptyMap()))) .get(); assertHitCount(searchResponse, 10); @@ -308,8 +309,8 @@ public class MaxIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("inc", 1); - Script script = new Script("[ doc['value'].value, doc['value'].value + inc ]", ScriptType.INLINE, - AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "[ doc['value'].value, doc['value'].value + inc ]", + params); SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) .addAggregation(max("max").script(script)) @@ -352,4 +353,43 @@ public class MaxIT extends AbstractNumericTestCase { } } -} \ No newline at end of file + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( + max("foo").field("d").script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", emptyMap()))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(max("foo").field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java index 56c12fbc77f..dde1c840ee6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java @@ -19,9 +19,10 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -44,13 +45,12 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.min; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; -/** - * - */ public class MinIT extends AbstractNumericTestCase { @Override protected Collection> nodePlugins() { @@ -166,7 +166,7 @@ public class MinIT extends AbstractNumericTestCase { .addAggregation( min("min") .field("value") - .script(new Script("_value - 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) .get(); assertHitCount(searchResponse, 10); @@ -182,7 +182,7 @@ public class MinIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("dec", 1); - Script script = new Script("_value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -219,7 +219,7 @@ public class MinIT extends AbstractNumericTestCase { .addAggregation( min("min") .field("values") - .script(new Script("_value - 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) .get(); assertHitCount(searchResponse, 10); @@ -237,7 +237,7 @@ public class MinIT extends AbstractNumericTestCase { .addAggregation( min("min") .field("values") - .script(new Script("_value * -1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()))) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap()))) .get(); assertHitCount(searchResponse, 10); @@ -253,7 +253,7 @@ public class MinIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("dec", 1); - Script script = new Script("_value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params); SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) .addAggregation(min("min").field("values").script(script)) @@ -269,7 +269,7 @@ public class MinIT extends AbstractNumericTestCase { @Override public void testScriptSingleValued() throws Exception { - Script script = new Script("doc['value'].value", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()); SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) .addAggregation(min("min").script(script)) @@ -288,7 +288,7 @@ public class MinIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("dec", 1); - Script script = new Script("doc['value'].value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) .addAggregation(min("min").script(script)) @@ -304,7 +304,7 @@ public class MinIT extends AbstractNumericTestCase { @Override public void testScriptMultiValued() throws Exception { - Script script = new Script("doc['values'].values", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values'].values", emptyMap()); SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) .addAggregation(min("min").script(script)) .get(); @@ -365,4 +365,43 @@ public class MinIT extends AbstractNumericTestCase { } } -} \ No newline at end of file + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( + min("foo").field("d").script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(min("foo").field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index e1800b2f9f1..545c10bcb03 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -28,7 +28,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -277,7 +277,7 @@ public class ScriptedMetricIT extends ESIntegTestCase { } public void testMap() { - Script mapScript = new Script("_agg['count'] = 1", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg['count'] = 1", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -317,7 +317,7 @@ public class ScriptedMetricIT extends ESIntegTestCase { Map params = new HashMap<>(); params.put("_agg", new ArrayList<>()); - Script mapScript = new Script("_agg.add(1)", ScriptType.INLINE, CustomScriptPlugin.NAME, params); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", params); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -365,8 +365,10 @@ public class ScriptedMetricIT extends ESIntegTestCase { .addAggregation( scriptedMetric("scripted") .params(params) - .initScript(new Script("vars.multiplier = 3", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) - .mapScript(new Script("_agg.add(vars.multiplier)", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .initScript( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap())) + .mapScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "_agg.add(vars.multiplier)", Collections.emptyMap()))) .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); @@ -404,8 +406,9 @@ public class ScriptedMetricIT extends ESIntegTestCase { params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script("_agg.add(1)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script combineScript = new Script("sum agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap()); + Script combineScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -455,9 +458,10 @@ public class ScriptedMetricIT extends ESIntegTestCase { params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script initScript = new Script("vars.multiplier = 3", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script mapScript = new Script("_agg.add(vars.multiplier)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script combineScript = new Script("sum agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script combineScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -508,10 +512,12 @@ public class ScriptedMetricIT extends ESIntegTestCase { params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script initScript = new Script("vars.multiplier = 3", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script mapScript = new Script("_agg.add(vars.multiplier)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script combineScript = new Script("sum agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script reduceScript = new Script("sum aggs of agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script combineScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + Script reduceScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -551,10 +557,12 @@ public class ScriptedMetricIT extends ESIntegTestCase { params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script initScript = new Script("vars.multiplier = 3", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script mapScript = new Script("_agg.add(vars.multiplier)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script combineScript = new Script("sum agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script reduceScript = new Script("sum aggs of agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script combineScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + Script reduceScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); SearchResponse searchResponse = client() .prepareSearch("idx") @@ -605,9 +613,11 @@ public class ScriptedMetricIT extends ESIntegTestCase { params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script("_agg.add(vars.multiplier)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script combineScript = new Script("sum agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script reduceScript = new Script("sum aggs of agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script combineScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + Script reduceScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -645,9 +655,10 @@ public class ScriptedMetricIT extends ESIntegTestCase { params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script initScript = new Script("vars.multiplier = 3", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script mapScript = new Script("_agg.add(vars.multiplier)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script reduceScript = new Script("sum aggs of agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script reduceScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -684,8 +695,9 @@ public class ScriptedMetricIT extends ESIntegTestCase { params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script("_agg.add(vars.multiplier)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script reduceScript = new Script("sum aggs of agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script reduceScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -725,11 +737,12 @@ public class ScriptedMetricIT extends ESIntegTestCase { Map reduceParams = new HashMap<>(); reduceParams.put("multiplier", 4); - Script initScript = new Script("vars.multiplier = 3", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script mapScript = new Script("_agg.add(vars.multiplier)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script combineScript = new Script("sum agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script reduceScript = new Script("multiplied sum aggs of agg values as a new aggregation", ScriptType.INLINE, - CustomScriptPlugin.NAME, reduceParams); + Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script combineScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + Script reduceScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "multiplied sum aggs of agg values as a new aggregation", reduceParams); SearchResponse response = client() .prepareSearch("idx") @@ -774,10 +787,14 @@ public class ScriptedMetricIT extends ESIntegTestCase { .addAggregation( scriptedMetric("scripted") .params(params) - .initScript(new Script("initScript_stored", ScriptType.STORED, CustomScriptPlugin.NAME, null)) - .mapScript(new Script("mapScript_stored", ScriptType.STORED, CustomScriptPlugin.NAME, null)) - .combineScript(new Script("combineScript_stored", ScriptType.STORED, CustomScriptPlugin.NAME, null)) - .reduceScript(new Script("reduceScript_stored", ScriptType.STORED, CustomScriptPlugin.NAME, null))) + .initScript( + new Script(ScriptType.STORED, CustomScriptPlugin.NAME, "initScript_stored", Collections.emptyMap())) + .mapScript( + new Script(ScriptType.STORED, CustomScriptPlugin.NAME, "mapScript_stored", Collections.emptyMap())) + .combineScript( + new Script(ScriptType.STORED, CustomScriptPlugin.NAME, "combineScript_stored", Collections.emptyMap())) + .reduceScript( + new Script(ScriptType.STORED, CustomScriptPlugin.NAME, "reduceScript_stored", Collections.emptyMap()))) .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); @@ -810,10 +827,12 @@ public class ScriptedMetricIT extends ESIntegTestCase { .addAggregation( scriptedMetric("scripted") .params(params) - .initScript(new Script("init_script", ScriptType.FILE, CustomScriptPlugin.NAME, null)) - .mapScript(new Script("map_script", ScriptType.FILE, CustomScriptPlugin.NAME, null)) - .combineScript(new Script("combine_script", ScriptType.FILE, CustomScriptPlugin.NAME, null)) - .reduceScript(new Script("reduce_script", ScriptType.FILE, CustomScriptPlugin.NAME, null))) + .initScript(new Script(ScriptType.FILE, CustomScriptPlugin.NAME, "init_script", Collections.emptyMap())) + .mapScript(new Script(ScriptType.FILE, CustomScriptPlugin.NAME, "map_script", Collections.emptyMap())) + .combineScript( + new Script(ScriptType.FILE, CustomScriptPlugin.NAME, "combine_script", Collections.emptyMap())) + .reduceScript( + new Script(ScriptType.FILE, CustomScriptPlugin.NAME, "reduce_script", Collections.emptyMap()))) .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); @@ -841,10 +860,12 @@ public class ScriptedMetricIT extends ESIntegTestCase { params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script initScript = new Script("vars.multiplier = 3", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script mapScript = new Script("_agg.add(vars.multiplier)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script combineScript = new Script("sum agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script reduceScript = new Script("sum aggs of agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script combineScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + Script reduceScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -900,10 +921,12 @@ public class ScriptedMetricIT extends ESIntegTestCase { params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script initScript = new Script("vars.multiplier = 3", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script mapScript = new Script("_agg.add(vars.multiplier)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script combineScript = new Script("sum agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script reduceScript = new Script("sum aggs of agg values as a new aggregation", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script combineScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + Script reduceScript = + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) @@ -933,4 +956,33 @@ public class ScriptedMetricIT extends ESIntegTestCase { assertThat(aggregationResult.size(), equalTo(1)); assertThat(aggregationResult.get(0), equalTo(0)); } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg['count'] = 1", Collections.emptyMap()); + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(mapScript)).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java index e4f96fae762..11c6ed7f6a2 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java @@ -20,10 +20,11 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregationBuilder; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -54,7 +55,8 @@ public class ScriptedMetricTests extends BaseAggregationTestCase params = new HashMap<>(); params.put("inc", 1); - Script script = new Script("doc['value'].value + inc", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -391,7 +394,7 @@ public class StatsIT extends AbstractNumericTestCase { @Override public void testScriptMultiValued() throws Exception { - Script script = new Script("doc['values'].values", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values'].values", emptyMap()); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -417,8 +420,8 @@ public class StatsIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("dec", 1); - Script script = new Script("[ doc['value'].value, doc['value'].value - dec ]", ScriptType.INLINE, - AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "[ doc['value'].value, doc['value'].value - dec ]", + params); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -485,4 +488,43 @@ public class StatsIT extends AbstractNumericTestCase { } assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards())); } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( + stats("foo").field("d").script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap()))).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(stats("foo").field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index 186bc8fb27b..61c80d648a6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptEngineService; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -44,6 +44,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -55,13 +56,12 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; -/** - * - */ public class SumIT extends AbstractNumericTestCase { @Override @@ -176,7 +176,8 @@ public class SumIT extends AbstractNumericTestCase { public void testSingleValuedFieldWithValueScript() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").field("value").script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, null))) + .addAggregation(sum("sum").field("value").script( + new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", Collections.emptyMap()))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -193,7 +194,7 @@ public class SumIT extends AbstractNumericTestCase { params.put("increment", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").field("value").script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, params))) + .addAggregation(sum("sum").field("value").script(new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", params))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -208,7 +209,8 @@ public class SumIT extends AbstractNumericTestCase { public void testScriptSingleValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").script(new Script("value", ScriptType.INLINE, ExtractFieldScriptEngine.NAME, null))) + .addAggregation(sum("sum").script( + new Script(ScriptType.INLINE, ExtractFieldScriptEngine.NAME, "value", Collections.emptyMap()))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -225,7 +227,7 @@ public class SumIT extends AbstractNumericTestCase { params.put("inc", 1); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").script(new Script("value", ScriptType.INLINE, ExtractFieldScriptEngine.NAME, params))) + .addAggregation(sum("sum").script(new Script(ScriptType.INLINE, ExtractFieldScriptEngine.NAME, "value", params))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -240,7 +242,8 @@ public class SumIT extends AbstractNumericTestCase { public void testScriptMultiValued() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").script(new Script("values", ScriptType.INLINE, ExtractFieldScriptEngine.NAME, null))) + .addAggregation(sum("sum").script( + new Script(ScriptType.INLINE, ExtractFieldScriptEngine.NAME, "values", Collections.emptyMap()))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -258,7 +261,7 @@ public class SumIT extends AbstractNumericTestCase { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - sum("sum").script(new Script("values", ScriptType.INLINE, ExtractFieldScriptEngine.NAME, params))) + sum("sum").script(new Script(ScriptType.INLINE, ExtractFieldScriptEngine.NAME, "values", params))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -290,7 +293,8 @@ public class SumIT extends AbstractNumericTestCase { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(sum("sum").field("values").script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, null))) + .addAggregation(sum("sum").field("values").script( + new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", Collections.emptyMap()))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -306,7 +310,7 @@ public class SumIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("increment", 1); SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(sum("sum").field("values").script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, params))) + .addAggregation(sum("sum").field("values").script(new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", params))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -347,6 +351,45 @@ public class SumIT extends AbstractNumericTestCase { } } + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(sum("foo").field("d").script( + new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", Collections.emptyMap()))).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(sum("foo").field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } + /** * Mock plugin for the {@link ExtractFieldScriptEngine} */ diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index e50b89d8b96..67ac4855026 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -20,12 +20,11 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -43,8 +42,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.Function; - import static java.util.Collections.emptyMap; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; @@ -53,7 +50,9 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.percentileRanks; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -242,7 +241,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { randomCompression( percentileRanks("percentile_ranks")) .field("value") - .script(new Script("_value - 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) .values(pcts)) .execute().actionGet(); @@ -263,7 +262,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { randomCompression( percentileRanks("percentile_ranks")) .field("value") - .script(new Script("_value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params)) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) .values(pcts)) .execute().actionGet(); @@ -298,7 +297,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { randomCompression( percentileRanks("percentile_ranks")) .field("values") - .script(new Script("_value - 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) .values(pcts)) .execute().actionGet(); @@ -316,7 +315,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { randomCompression( percentileRanks("percentile_ranks")) .field("values") - .script(new Script("_value * -1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) .values(pcts)) .execute().actionGet(); @@ -337,7 +336,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { randomCompression( percentileRanks("percentile_ranks")) .field("values") - .script(new Script("_value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params)) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) .values(pcts)) .execute().actionGet(); @@ -355,7 +354,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { .addAggregation( randomCompression( percentileRanks("percentile_ranks")) - .script(new Script("doc['value'].value", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) .values(pcts)) .execute().actionGet(); @@ -370,7 +369,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("dec", 1); - Script script = new Script("doc['value'].value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); SearchResponse searchResponse = client().prepareSearch("idx") @@ -391,7 +390,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { @Override public void testScriptMultiValued() throws Exception { final double[] pcts = randomPercents(minValues, maxValues); - Script script = new Script("doc['values'].values", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values'].values", emptyMap()); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( @@ -484,4 +483,42 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { } } + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(percentileRanks("foo").field("d").values(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(percentileRanks("foo").field("d").values(50.0)).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } + } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index b0268351954..dbc7993c512 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -20,10 +20,11 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -50,7 +51,9 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -223,7 +226,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { randomCompression( percentiles("percentiles")) .field("value") - .script(new Script("_value - 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) .percentiles(pcts)) .execute().actionGet(); @@ -244,7 +247,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { randomCompression( percentiles("percentiles")) .field("value") - .script(new Script("_value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params)) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) .percentiles(pcts)) .execute().actionGet(); @@ -277,7 +280,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { randomCompression( percentiles("percentiles")) .field("values") - .script(new Script("_value - 1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) .percentiles(pcts)) .execute().actionGet(); @@ -295,7 +298,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { randomCompression( percentiles("percentiles")) .field("values") - .script(new Script("_value * -1", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap())) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) .percentiles(pcts)) .execute().actionGet(); @@ -316,7 +319,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { randomCompression( percentiles("percentiles")) .field("values") - .script(new Script("_value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params)) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) .percentiles(pcts)) .execute().actionGet(); @@ -328,7 +331,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { @Override public void testScriptSingleValued() throws Exception { - Script script = new Script("doc['value'].value", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()); final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -350,7 +353,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { Map params = new HashMap<>(); params.put("dec", 1); - Script script = new Script("doc['value'].value - dec", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") @@ -371,7 +374,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { @Override public void testScriptMultiValued() throws Exception { final double[] pcts = randomPercentiles(); - Script script = new Script("doc['values'].values", ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, emptyMap()); + Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values'].values", emptyMap()); SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -465,4 +468,43 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { } } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(percentiles("foo").field("d") + .percentiles(50.0).script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(percentiles("foo").field("d").percentiles(50.0)).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 45d44c863a4..4bc640dc900 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -25,14 +25,16 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.SearchHits; @@ -46,6 +48,8 @@ import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; +import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; +import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -84,9 +88,6 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; -/** - * - */ @ESIntegTestCase.SuiteScopeTestCase() public class TopHitsIT extends ESIntegTestCase { @@ -582,7 +583,7 @@ public class TopHitsIT extends ESIntegTestCase { .explain(true) .storedField("text") .fieldDataField("field1") - .scriptField("script", new Script("5", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, Collections.emptyMap())) + .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) .fetchSource("text", null) .version(true) ) @@ -864,7 +865,7 @@ public class TopHitsIT extends ESIntegTestCase { nested("to-comments", "comments").subAggregation( topHits("top-comments").size(1).highlighter(new HighlightBuilder().field(hlField)).explain(true) .fieldDataField("comments.user") - .scriptField("script", new Script("5", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, Collections.emptyMap())).fetchSource("comments.message", null) + .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())).fetchSource("comments.message", null) .version(true).sort("comments.date", SortOrder.ASC))).get(); assertHitCount(searchResponse, 2); Nested nested = searchResponse.getAggregations().get("to-comments"); @@ -993,4 +994,152 @@ public class TopHitsIT extends ESIntegTestCase { } } } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script field does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(topHits("foo").scriptField("bar", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()))).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script sort does not get cached + r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(topHits("foo").sort( + SortBuilders.scriptSort( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()), ScriptSortType.STRING))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(topHits("foo")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } + + public void testWithRescore() { + // Rescore with default sort on relevancy (score) + { + SearchResponse response = client() + .prepareSearch("idx") + .addRescorer( + RescoreBuilder.queryRescorer(new MatchAllQueryBuilder().boost(3.0f)) + ) + .setTypes("type") + .addAggregation(terms("terms") + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits") + ) + ) + .get(); + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.score(), equalTo(4.0f)); + } + } + } + + { + SearchResponse response = client() + .prepareSearch("idx") + .addRescorer( + RescoreBuilder.queryRescorer(new MatchAllQueryBuilder().boost(3.0f)) + ) + .setTypes("type") + .addAggregation(terms("terms") + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").sort(SortBuilders.scoreSort()) + ) + ) + .get(); + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.score(), equalTo(4.0f)); + } + } + } + + // Rescore should not be applied if the sort order is not relevancy + { + SearchResponse response = client() + .prepareSearch("idx") + .addRescorer( + RescoreBuilder.queryRescorer(new MatchAllQueryBuilder().boost(3.0f)) + ) + .setTypes("type") + .addAggregation(terms("terms") + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").sort(SortBuilders.fieldSort("_type")) + ) + ) + .get(); + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.score(), equalTo(Float.NaN)); + } + } + } + + { + SearchResponse response = client() + .prepareSearch("idx") + .addRescorer( + RescoreBuilder.queryRescorer(new MatchAllQueryBuilder().boost(3.0f)) + ) + .setTypes("type") + .addAggregation(terms("terms") + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").sort(SortBuilders.scoreSort()).sort(SortBuilders.fieldSort("_type")) + ) + ) + .get(); + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.score(), equalTo(Float.NaN)); + } + } + } + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java index 3f2b4c44620..98e28339036 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java @@ -107,26 +107,27 @@ public class TopHitsTests extends BaseAggregationTestCase params = Collections.singletonMap("s", "value"); SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(count("count").script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, params))).execute().actionGet(); + .addAggregation(count("count").script(new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", params))).execute().actionGet(); assertHitCount(searchResponse, 10); @@ -199,7 +195,7 @@ public class ValueCountIT extends ESIntegTestCase { public void testMultiValuedScriptWithParams() throws Exception { Map params = Collections.singletonMap("s", "values"); SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(count("count").script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, params))).execute().actionGet(); + .addAggregation(count("count").script(new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "", params))).execute().actionGet(); assertHitCount(searchResponse, 10); @@ -209,6 +205,46 @@ public class ValueCountIT extends ESIntegTestCase { assertThat(valueCount.getValue(), equalTo(20L)); } + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get()); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(count("foo").field("d").script( + new Script(ScriptType.INLINE, FieldValueScriptEngine.NAME, "value", Collections.emptyMap()))) + .get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(count("foo").field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } + /** * Mock plugin for the {@link FieldValueScriptEngine} */ diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java index 101e52fcadb..e76b02a8c95 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java @@ -26,13 +26,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.test.ESIntegTestCase; -import org.joda.time.DateTime; import java.io.IOException; import java.util.ArrayList; @@ -165,8 +164,9 @@ public class BucketScriptIT extends ESIntegTestCase { .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) .subAggregation( bucketScript("seriesArithmetic", - new Script("_value0 + _value1 + _value2", ScriptType.INLINE, CustomScriptPlugin.NAME, null) - , "field2Sum", "field3Sum", "field4Sum"))) + new Script(ScriptType.INLINE, + CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), + "field2Sum", "field3Sum", "field4Sum"))) .execute().actionGet(); assertSearchResponse(response); @@ -211,7 +211,8 @@ public class BucketScriptIT extends ESIntegTestCase { .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) .subAggregation( bucketScript("seriesArithmetic", - new Script("_value0 + _value1 / _value2", ScriptType.INLINE, CustomScriptPlugin.NAME, null), + new Script(ScriptType.INLINE, + CustomScriptPlugin.NAME, "_value0 + _value1 / _value2", Collections.emptyMap()), "field2Sum", "field3Sum", "field4Sum"))) .execute().actionGet(); @@ -257,7 +258,7 @@ public class BucketScriptIT extends ESIntegTestCase { .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) .subAggregation( bucketScript("seriesArithmetic", - new Script("_value0 + _value1 + _value2", ScriptType.INLINE, CustomScriptPlugin.NAME, null) + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()) , "field2Sum", "field3Sum", "field4Sum"))) .execute().actionGet(); @@ -301,7 +302,7 @@ public class BucketScriptIT extends ESIntegTestCase { .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) .subAggregation( bucketScript("seriesArithmetic", - new Script("_value0", ScriptType.INLINE, CustomScriptPlugin.NAME, null), + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0", Collections.emptyMap()), "field2Sum"))) .execute().actionGet(); @@ -345,7 +346,8 @@ public class BucketScriptIT extends ESIntegTestCase { .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) .subAggregation( bucketScript("seriesArithmetic", bucketsPathsMap, - new Script("foo + bar + baz", ScriptType.INLINE, CustomScriptPlugin.NAME, null)))) + new Script(ScriptType.INLINE, + CustomScriptPlugin.NAME, "foo + bar + baz", Collections.emptyMap())))) .execute().actionGet(); assertSearchResponse(response); @@ -382,7 +384,7 @@ public class BucketScriptIT extends ESIntegTestCase { Map params = new HashMap<>(); params.put("factor", 3); - Script script = new Script("(_value0 + _value1 + _value2) * factor", ScriptType.INLINE, CustomScriptPlugin.NAME, params); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "(_value0 + _value1 + _value2) * factor", params); SearchResponse response = client() .prepareSearch("idx") @@ -438,7 +440,8 @@ public class BucketScriptIT extends ESIntegTestCase { .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) .subAggregation( bucketScript("seriesArithmetic", - new Script("_value0 + _value1 + _value2", ScriptType.INLINE, CustomScriptPlugin.NAME, null), + new Script(ScriptType.INLINE, + CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), "field2Sum", "field3Sum", "field4Sum").gapPolicy(GapPolicy.INSERT_ZEROS))) .execute().actionGet(); @@ -492,7 +495,7 @@ public class BucketScriptIT extends ESIntegTestCase { .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) .subAggregation( bucketScript("seriesArithmetic", - new Script("my_script", ScriptType.STORED, CustomScriptPlugin.NAME, null), + new Script(ScriptType.STORED, CustomScriptPlugin.NAME, "my_script", Collections.emptyMap()), "field2Sum", "field3Sum", "field4Sum"))).execute().actionGet(); assertSearchResponse(response); @@ -537,7 +540,8 @@ public class BucketScriptIT extends ESIntegTestCase { .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) .subAggregation( bucketScript("seriesArithmetic", - new Script("_value0 + _value1 + _value2", ScriptType.INLINE, CustomScriptPlugin.NAME, null), + new Script(ScriptType.INLINE, + CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), "field2Sum", "field3Sum", "field4Sum"))) .execute().actionGet(); @@ -561,7 +565,8 @@ public class BucketScriptIT extends ESIntegTestCase { .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) .subAggregation( bucketScript("seriesArithmetic", - new Script("_value0 + _value1 + _value2", ScriptType.INLINE, CustomScriptPlugin.NAME, null), + new Script(ScriptType.INLINE, + CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), "field2Sum", "field3Sum", "field4Sum"))).execute().actionGet(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java index 252c1247a49..d485a0b6d87 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketscript.BucketScriptPipelineAggregationBuilder; @@ -42,12 +42,11 @@ public class BucketScriptTests extends BasePipelineAggregationTestCase params = null; + Map params = new HashMap<>(); if (randomBoolean()) { - params = new HashMap(); params.put("foo", "bar"); } - script = new Script("script", randomFrom(ScriptType.values()), randomFrom("my_lang", null), params); + script = new Script(randomFrom(ScriptType.values()), randomFrom("my_lang", Script.DEFAULT_SCRIPT_LANG), "script", params); } BucketScriptPipelineAggregationBuilder factory = new BucketScriptPipelineAggregationBuilder(name, bucketsPaths, script); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java index 26c0d3a4bbe..67261ee02a4 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java @@ -26,8 +26,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; -import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.metrics.sum.Sum; @@ -177,8 +176,8 @@ public class BucketSelectorIT extends ESIntegTestCase { } public void testInlineScript() { - Script script = - new Script("Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") .addAggregation(histogram("histo").field(FIELD_1_NAME).interval(interval) @@ -206,7 +205,8 @@ public class BucketSelectorIT extends ESIntegTestCase { } public void testInlineScriptNoBucketsPruned() { - Script script = new Script("Double.isNaN(_value0) ? true : (_value0 < 10000)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? true : (_value0 < 10000)", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -239,7 +239,8 @@ public class BucketSelectorIT extends ESIntegTestCase { } public void testInlineScriptNoBucketsLeft() { - Script script = new Script("Double.isNaN(_value0) ? false : (_value0 > 10000)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 > 10000)", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -262,7 +263,8 @@ public class BucketSelectorIT extends ESIntegTestCase { } public void testInlineScript2() { - Script script = new Script("Double.isNaN(_value0) ? false : (_value0 < _value1)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 < _value1)", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -295,7 +297,8 @@ public class BucketSelectorIT extends ESIntegTestCase { } public void testInlineScriptSingleVariable() { - Script script = new Script("Double.isNaN(_value0) ? false : (_value0 > 100)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 > 100)", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -324,8 +327,8 @@ public class BucketSelectorIT extends ESIntegTestCase { } public void testInlineScriptNamedVars() { - Script script = new Script("Double.isNaN(my_value1) ? false : (my_value1 + my_value2 > 100)", ScriptType.INLINE, - CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "Double.isNaN(my_value1) ? false : (my_value1 + my_value2 > 100)", Collections.emptyMap()); Map bucketPathsMap = new HashMap<>(); bucketPathsMap.put("my_value1", "field2Sum"); @@ -361,8 +364,8 @@ public class BucketSelectorIT extends ESIntegTestCase { } public void testInlineScriptWithParams() { - Script script = new Script("Double.isNaN(_value0) ? false : (_value0 + _value1 > threshold)", ScriptType.INLINE, - CustomScriptPlugin.NAME, Collections.singletonMap("threshold", 100)); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 + _value1 > threshold)", Collections.singletonMap("threshold", 100)); SearchResponse response = client().prepareSearch("idx") .addAggregation( @@ -394,7 +397,7 @@ public class BucketSelectorIT extends ESIntegTestCase { } public void testInlineScriptInsertZeros() { - Script script = new Script("_value0 + _value1 > 100", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0 + _value1 > 100", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") .addAggregation( @@ -433,7 +436,7 @@ public class BucketSelectorIT extends ESIntegTestCase { // Source is not interpreted but my_script is defined in CustomScriptPlugin .setSource(new BytesArray("{ \"script\": \"Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)\" }"))); - Script script = new Script("my_script", ScriptType.STORED, CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.STORED, CustomScriptPlugin.NAME, "my_script", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -466,8 +469,8 @@ public class BucketSelectorIT extends ESIntegTestCase { } public void testUnmapped() throws Exception { - Script script = new Script("Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", ScriptType.INLINE, - CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx_unmapped") .addAggregation( @@ -488,8 +491,8 @@ public class BucketSelectorIT extends ESIntegTestCase { } public void testPartiallyUnmapped() throws Exception { - Script script = new Script("Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", ScriptType.INLINE, - CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx", "idx_unmapped") .addAggregation( diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorTests.java index 5ffbda13f9c..563894906ed 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.BasePipelineAggregationTestCase; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketselector.BucketSelectorPipelineAggregationBuilder; @@ -42,12 +42,11 @@ public class BucketSelectorTests extends BasePipelineAggregationTestCase params = null; + Map params = new HashMap<>(); if (randomBoolean()) { - params = new HashMap(); params.put("foo", "bar"); } - script = new Script("script", randomFrom(ScriptType.values()), randomFrom("my_lang", null), params); + script = new Script(randomFrom(ScriptType.values()), randomFrom("my_lang", Script.DEFAULT_SCRIPT_LANG), "script", params); } BucketSelectorPipelineAggregationBuilder factory = new BucketSelectorPipelineAggregationBuilder(name, bucketsPaths, script); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java index 3ad2367c5c5..c582c76bd8c 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Iterator; import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -51,7 +52,7 @@ import static org.hamcrest.core.IsNull.notNullValue; public class PercentilesBucketIT extends ESIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - private static final double[] PERCENTS = {1.0, 25.0, 50.0, 75.0, 99.0}; + private static final double[] PERCENTS = {0.0, 1.0, 25.0, 50.0, 75.0, 99.0, 100.0}; static int numDocs; static int interval; static int minRandomValue; @@ -123,11 +124,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket"); assertThat(percentilesBucketValue, notNullValue()); assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - for (Double p : PERCENTS) { - double expected = values[(int)((p / 100) * values.length)]; - assertThat(percentilesBucketValue.percentile(p), equalTo(expected)); - } - + assertPercentileBucket(PERCENTS, values, percentilesBucketValue); } public void testDocCountAsSubAgg() throws Exception { @@ -174,10 +171,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket"); assertThat(percentilesBucketValue, notNullValue()); assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - for (Double p : PERCENTS) { - double expected = values[(int)((p / 100) * values.length)]; - assertThat(percentilesBucketValue.percentile(p), equalTo(expected)); - } + assertPercentileBucket(PERCENTS, values, percentilesBucketValue); } } @@ -212,10 +206,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket"); assertThat(percentilesBucketValue, notNullValue()); assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - for (Double p : PERCENTS) { - double expected = values[(int)((p / 100) * values.length)]; - assertThat(percentilesBucketValue.percentile(p), equalTo(expected)); - } + assertPercentileBucket(PERCENTS, values, percentilesBucketValue); } public void testMetricTopLevelDefaultPercents() throws Exception { @@ -248,11 +239,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket"); assertThat(percentilesBucketValue, notNullValue()); assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - for (Percentile p : percentilesBucketValue) { - double expected = values[(int)((p.getPercent() / 100) * values.length)]; - assertThat(percentilesBucketValue.percentile(p.getPercent()), equalTo(expected)); - assertThat(p.getValue(), equalTo(expected)); - } + assertPercentileBucket(values, percentilesBucketValue); } public void testMetricAsSubAgg() throws Exception { @@ -304,10 +291,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket"); assertThat(percentilesBucketValue, notNullValue()); assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - for (Double p : PERCENTS) { - double expected = values.get((int) ((p / 100) * values.size())); - assertThat(percentilesBucketValue.percentile(p), equalTo(expected)); - } + assertPercentileBucket(PERCENTS, values.stream().mapToDouble(Double::doubleValue).toArray(), percentilesBucketValue); } } @@ -361,10 +345,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket"); assertThat(percentilesBucketValue, notNullValue()); assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - for (Double p : PERCENTS) { - double expected = values[(int)((p / 100) * values.length)]; - assertThat(percentilesBucketValue.percentile(p), equalTo(expected)); - } + assertPercentileBucket(PERCENTS, values, percentilesBucketValue); } } @@ -489,7 +470,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(percentilesBucket("percentile_histo_bucket", "histo>_count"))) + .subAggregation(percentilesBucket("percentile_histo_bucket", "histo>_count").percents(PERCENTS))) .addAggregation(percentilesBucket("percentile_terms_bucket", "terms>percentile_histo_bucket.50") .percents(PERCENTS)).execute().actionGet(); @@ -525,10 +506,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentile_histo_bucket"); assertThat(percentilesBucketValue, notNullValue()); assertThat(percentilesBucketValue.getName(), equalTo("percentile_histo_bucket")); - for (Double p : PERCENTS) { - double expected = innerValues[(int)((p / 100) * innerValues.length)]; - assertThat(percentilesBucketValue.percentile(p), equalTo(expected)); - } + assertPercentileBucket(PERCENTS, innerValues, percentilesBucketValue); values[i] = percentilesBucketValue.percentile(50.0); } @@ -537,10 +515,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentile_terms_bucket"); assertThat(percentilesBucketValue, notNullValue()); assertThat(percentilesBucketValue.getName(), equalTo("percentile_terms_bucket")); - for (Double p : PERCENTS) { - double expected = values[(int)((p / 100) * values.length)]; - assertThat(percentilesBucketValue.percentile(p), equalTo(expected)); - } + assertPercentileBucket(PERCENTS, values, percentilesBucketValue); } public void testNestedWithDecimal() throws Exception { @@ -591,10 +566,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentile_histo_bucket"); assertThat(percentilesBucketValue, notNullValue()); assertThat(percentilesBucketValue.getName(), equalTo("percentile_histo_bucket")); - for (Double p : percent) { - double expected = innerValues[(int)((p / 100) * innerValues.length)]; - assertThat(percentilesBucketValue.percentile(p), equalTo(expected)); - } + assertPercentileBucket(innerValues, percentilesBucketValue); values[i] = percentilesBucketValue.percentile(99.9); } @@ -608,4 +580,22 @@ public class PercentilesBucketIT extends ESIntegTestCase { assertThat(percentilesBucketValue.percentile(p), equalTo(expected)); } } + + private void assertPercentileBucket(double[] values, PercentilesBucket percentiles) { + for (Percentile percentile : percentiles) { + assertEquals(percentiles.percentile(percentile.getPercent()), percentile.getValue(), 0d); + int index = (int) Math.round((percentile.getPercent() / 100.0) * (values.length - 1)); + assertThat(percentile.getValue(), equalTo(values[index])); + } + } + + private void assertPercentileBucket(double[] percents, double[] values, PercentilesBucket percentiles) { + Iterator it = percentiles.iterator(); + for (int i = 0; i < percents.length; ++i) { + assertTrue(it.hasNext()); + assertEquals(percents[i], it.next().getPercent(), 0d); + } + assertFalse(it.hasNext()); + assertPercentileBucket(values, percentiles); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/support/PathTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/support/PathTests.java index 4602035a40b..c5c681c117f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/support/PathTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/support/PathTests.java @@ -27,9 +27,6 @@ import java.util.List; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class PathTests extends ESTestCase { public void testInvalidPaths() throws Exception { assertInvalidPath("[foo]", "brackets at the beginning of the token expression"); diff --git a/core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java b/core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java index 49c24ba9952..9f03724d3c6 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java @@ -32,7 +32,7 @@ import org.elasticsearch.index.query.GeohashCellQuery; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.ScriptScoreFunctionBuilder; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -43,6 +43,7 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; +import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.TreeSet; @@ -450,7 +451,7 @@ public class TransportTwoNodesSearchIT extends ESIntegTestCase { MultiSearchResponse response = client().prepareMultiSearch() // Add custom score query with bogus script - .add(client().prepareSearch("test").setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("nid", 1), new ScriptScoreFunctionBuilder(new Script("foo", ScriptService.ScriptType.INLINE, "bar", null))))) + .add(client().prepareSearch("test").setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("nid", 1), new ScriptScoreFunctionBuilder(new Script(ScriptType.INLINE, "bar", "foo", Collections.emptyMap()))))) .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2))) .add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())) .execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 967af3d3afc..c494e7c14eb 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -40,6 +40,8 @@ import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; import java.io.IOException; @@ -96,40 +98,13 @@ public class SearchSourceBuilderTests extends AbstractSearchTestCase { } public void testEqualsAndHashcode() throws IOException { - SearchSourceBuilder firstBuilder = createSearchSourceBuilder(); - assertNotNull("source builder is equal to null", firstBuilder); - assertFalse("source builder is equal to incompatible type", firstBuilder.equals("")); - assertTrue("source builder is not equal to self", firstBuilder.equals(firstBuilder)); - assertThat("same source builder's hashcode returns different values if called multiple times", firstBuilder.hashCode(), - equalTo(firstBuilder.hashCode())); - - SearchSourceBuilder secondBuilder = copyBuilder(firstBuilder); - assertTrue("source builder is not equal to self", secondBuilder.equals(secondBuilder)); - assertTrue("source builder is not equal to its copy", firstBuilder.equals(secondBuilder)); - assertTrue("source builder is not symmetric", secondBuilder.equals(firstBuilder)); - assertThat("source builder copy's hashcode is different from original hashcode", - secondBuilder.hashCode(), equalTo(firstBuilder.hashCode())); - - SearchSourceBuilder thirdBuilder = copyBuilder(secondBuilder); - assertTrue("source builder is not equal to self", thirdBuilder.equals(thirdBuilder)); - assertTrue("source builder is not equal to its copy", secondBuilder.equals(thirdBuilder)); - assertThat("source builder copy's hashcode is different from original hashcode", - secondBuilder.hashCode(), equalTo(thirdBuilder.hashCode())); - assertTrue("equals is not transitive", firstBuilder.equals(thirdBuilder)); - assertThat("source builder copy's hashcode is different from original hashcode", - firstBuilder.hashCode(), equalTo(thirdBuilder.hashCode())); - assertTrue("equals is not symmetric", thirdBuilder.equals(secondBuilder)); - assertTrue("equals is not symmetric", thirdBuilder.equals(firstBuilder)); + // TODO add test checking that changing any member of this class produces an object that is not equal to the original + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createSearchSourceBuilder(), this::copyBuilder); } //we use the streaming infra to create a copy of the builder provided as argument - private SearchSourceBuilder copyBuilder(SearchSourceBuilder builder) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - builder.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - return new SearchSourceBuilder(in); - } - } + private SearchSourceBuilder copyBuilder(SearchSourceBuilder original) throws IOException { + return ESTestCase.copyWriteable(original, namedWriteableRegistry, SearchSourceBuilder::new); } public void testParseIncludeExclude() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java index 3aa98942833..a3ecc66c030 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java @@ -36,18 +36,21 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.HasChildQueryBuilder; import org.elasticsearch.index.query.HasParentQueryBuilder; import org.elasticsearch.index.query.IdsQueryBuilder; +import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder.Field; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; @@ -68,8 +71,8 @@ import java.util.Set; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; -import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery; import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery; +import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery; import static org.elasticsearch.index.query.QueryBuilders.idsQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; @@ -811,6 +814,30 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2")); } + public void testHasChildInnerHitsHighlighting() throws Exception { + assertAcked(prepareCreate("test") + .addMapping("parent") + .addMapping("child", "_parent", "type=parent")); + ensureGreen(); + + client().prepareIndex("test", "parent", "1").setSource("p_field", 1).get(); + client().prepareIndex("test", "child", "2").setParent("1").setSource("c_field", "foo bar").get(); + client().admin().indices().prepareFlush("test").get(); + + SearchResponse searchResponse = client().prepareSearch("test").setQuery( + hasChildQuery("child", matchQuery("c_field", "foo"), ScoreMode.None) + .innerHit(new InnerHitBuilder().setHighlightBuilder( + new HighlightBuilder().field(new Field("c_field").highlightQuery(QueryBuilders.matchQuery("c_field", "bar")))))) + .get(); + assertNoFailures(searchResponse); + assertThat(searchResponse.getHits().totalHits(), equalTo(1L)); + assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1")); + SearchHit[] searchHits = searchResponse.getHits().hits()[0].getInnerHits().get("child").hits(); + assertThat(searchHits.length, equalTo(1)); + assertThat(searchHits[0].getHighlightFields().get("c_field").getFragments().length, equalTo(1)); + assertThat(searchHits[0].getHighlightFields().get("c_field").getFragments()[0].string(), equalTo("foo bar")); + } + public void testHasChildAndHasParentWrappedInAQueryFilter() throws Exception { assertAcked(prepareCreate("test") .addMapping("parent") @@ -1121,7 +1148,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { hasChildQuery( "child_type_one", boolQuery().must( - queryStringQuery("name:William*").analyzeWildcard(true) + queryStringQuery("name:William*") ), ScoreMode.None) ), @@ -1138,7 +1165,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { hasChildQuery( "child_type_two", boolQuery().must( - queryStringQuery("name:William*").analyzeWildcard(true) + queryStringQuery("name:William*") ), ScoreMode.None) ), diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java index d20fb4e0c06..814f514c96a 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java @@ -136,7 +136,7 @@ public class FetchSourceSubPhaseTests extends ESTestCase { @Override public SearchLookup lookup() { - SearchLookup lookup = super.lookup(); + SearchLookup lookup = new SearchLookup(this.mapperService(), this.fieldData(), null); lookup.source().setSource(source); return lookup; } diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index 1e43ffe532e..86ca66eb87c 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -32,7 +32,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; @@ -167,7 +167,7 @@ public class InnerHitsIT extends ESIntegTestCase { .setExplain(true) .addDocValueField("comments.message") .addScriptField("script", - new Script("5", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, Collections.emptyMap())) + new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) .setSize(1) )).get(); assertNoFailures(response); @@ -301,8 +301,8 @@ public class InnerHitsIT extends ESIntegTestCase { .addDocValueField("message") .setHighlightBuilder(new HighlightBuilder().field("message")) .setExplain(true).setSize(1) - .addScriptField("script", new Script("5", ScriptService.ScriptType.INLINE, - MockScriptEngine.NAME, Collections.emptyMap())) + .addScriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", + Collections.emptyMap())) ) ).get(); assertNoFailures(response); @@ -666,7 +666,7 @@ public class InnerHitsIT extends ESIntegTestCase { .innerHit(new InnerHitBuilder())).get(); assertNoFailures(response); assertHitCount(response, 1); - SearchHit hit = response.getHits().getAt(0); + SearchHit hit = response.getHits().getAt(0); assertThat(hit.id(), equalTo("1")); SearchHits messages = hit.getInnerHits().get("comments.messages"); assertThat(messages.getTotalHits(), equalTo(1L)); @@ -982,7 +982,8 @@ public class InnerHitsIT extends ESIntegTestCase { // other features (like in the query dsl or aggs) in order for consistency: SearchResponse response = client().prepareSearch() .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None) - .innerHit(new InnerHitBuilder().setFetchSourceContext(new FetchSourceContext("comments.message")))) + .innerHit(new InnerHitBuilder().setFetchSourceContext(new FetchSourceContext(true, + new String[]{"comments.message"}, null)))) .get(); assertNoFailures(response); assertHitCount(response, 1); diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java index 170638b295f..ebf3b6e1a50 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java @@ -39,9 +39,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItemInArray; -/** - * - */ public class MatchedQueriesIT extends ESIntegTestCase { public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { createIndex("test"); diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/NestedChildrenFilterTests.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/NestedChildrenFilterTests.java index 440d90bdba4..c4c180ab858 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/NestedChildrenFilterTests.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/NestedChildrenFilterTests.java @@ -46,8 +46,6 @@ import java.util.List; import static org.hamcrest.Matchers.equalTo; -/** - */ public class NestedChildrenFilterTests extends ESTestCase { public void testNestedChildrenFilter() throws Exception { int numParentDocs = scaledRandomIntBetween(0, 32); diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index ed272040f51..6fe98a2a3ef 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; @@ -49,9 +48,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.fetch.subphase.highlight.AbstractHighlighterBuilder; -import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder.Field; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder.Order; import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight.FieldOptions; @@ -72,8 +68,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import static java.util.Collections.emptyList; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; public class HighlightBuilderTests extends ESTestCase { @@ -115,31 +111,7 @@ public class HighlightBuilderTests extends ESTestCase { */ public void testEqualsAndHashcode() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { - HighlightBuilder firstBuilder = randomHighlighterBuilder(); - assertFalse("highlighter is equal to null", firstBuilder.equals(null)); - assertFalse("highlighter is equal to incompatible type", firstBuilder.equals("")); - assertTrue("highlighter is not equal to self", firstBuilder.equals(firstBuilder)); - assertThat("same highlighter's hashcode returns different values if called multiple times", firstBuilder.hashCode(), - equalTo(firstBuilder.hashCode())); - assertThat("different highlighters should not be equal", mutate(firstBuilder), not(equalTo(firstBuilder))); - - HighlightBuilder secondBuilder = serializedCopy(firstBuilder); - assertTrue("highlighter is not equal to self", secondBuilder.equals(secondBuilder)); - assertTrue("highlighter is not equal to its copy", firstBuilder.equals(secondBuilder)); - assertTrue("equals is not symmetric", secondBuilder.equals(firstBuilder)); - assertThat("highlighter copy's hashcode is different from original hashcode", secondBuilder.hashCode(), - equalTo(firstBuilder.hashCode())); - - HighlightBuilder thirdBuilder = serializedCopy(secondBuilder); - assertTrue("highlighter is not equal to self", thirdBuilder.equals(thirdBuilder)); - assertTrue("highlighter is not equal to its copy", secondBuilder.equals(thirdBuilder)); - assertThat("highlighter copy's hashcode is different from original hashcode", secondBuilder.hashCode(), - equalTo(thirdBuilder.hashCode())); - assertTrue("equals is not transitive", firstBuilder.equals(thirdBuilder)); - assertThat("highlighter copy's hashcode is different from original hashcode", firstBuilder.hashCode(), - equalTo(thirdBuilder.hashCode())); - assertTrue("equals is not symmetric", thirdBuilder.equals(secondBuilder)); - assertTrue("equals is not symmetric", thirdBuilder.equals(firstBuilder)); + checkEqualsAndHashCode(randomHighlighterBuilder(), HighlightBuilderTests::serializedCopy, HighlightBuilderTests::mutate); } } @@ -293,8 +265,8 @@ public class HighlightBuilderTests extends ESTestCase { Index index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_"); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings); // shard context will only need indicesQueriesRegistry for building Query objects nested in highlighter - QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, indicesQueriesRegistry, - null, null, null) { + QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, null, null, null, null, null, indicesQueriesRegistry, + null, null, null, System::currentTimeMillis) { @Override public MappedFieldType fieldMapper(String name) { TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name); @@ -335,7 +307,7 @@ public class HighlightBuilderTests extends ESTestCase { String[] copy = Arrays.copyOf(fieldBuilder.matchedFields, fieldBuilder.matchedFields.length); Arrays.sort(copy); assertArrayEquals(copy, - new TreeSet(fieldOptions.matchedFields()).toArray(new String[fieldOptions.matchedFields().size()])); + new TreeSet<>(fieldOptions.matchedFields()).toArray(new String[fieldOptions.matchedFields().size()])); } else { assertNull(fieldOptions.matchedFields()); } @@ -536,7 +508,7 @@ public class HighlightBuilderTests extends ESTestCase { return testHighlighter; } - @SuppressWarnings({ "rawtypes", "unchecked" }) + @SuppressWarnings({ "rawtypes"}) private static void setRandomCommonOptions(AbstractHighlighterBuilder highlightBuilder) { if (randomBoolean()) { // need to set this together, otherwise parsing will complain @@ -600,7 +572,7 @@ public class HighlightBuilderTests extends ESTestCase { } if (randomBoolean()) { int items = randomIntBetween(0, 5); - Map options = new HashMap(items); + Map options = new HashMap<>(items); for (int i = 0; i < items; i++) { Object value = null; switch (randomInt(2)) { @@ -673,7 +645,7 @@ public class HighlightBuilderTests extends ESTestCase { break; case 15: int items = 6; - Map options = new HashMap(items); + Map options = new HashMap<>(items); for (int i = 0; i < items; i++) { options.put(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); } @@ -699,7 +671,7 @@ public class HighlightBuilderTests extends ESTestCase { */ private static String[] randomStringArray(int minSize, int maxSize) { int size = randomIntBetween(minSize, maxSize); - Set randomStrings = new HashSet(size); + Set randomStrings = new HashSet<>(size); for (int f = 0; f < size; f++) { randomStrings.add(randomAsciiOfLengthBetween(3, 10)); } @@ -741,11 +713,6 @@ public class HighlightBuilderTests extends ESTestCase { } private static HighlightBuilder serializedCopy(HighlightBuilder original) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - original.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - return new HighlightBuilder(in); - } - } + return ESTestCase.copyWriteable(original, namedWriteableRegistry, HighlightBuilder::new); } } diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 19904be38b3..ed63ea1ea1c 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.geo.GeoPoint; @@ -40,15 +41,19 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.search.MatchQuery; +import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder.Field; +import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.hamcrest.Matcher; import org.hamcrest.Matchers; +import org.joda.time.DateTime; +import org.joda.time.chrono.ISOChronology; import java.io.IOException; import java.util.Collection; @@ -84,6 +89,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHigh import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNotHighlighted; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -2907,4 +2913,75 @@ public class HighlighterSearchIT extends ESIntegTestCase { assertThat(field.getFragments().length, equalTo(1)); assertThat(field.getFragments()[0].string(), equalTo("brown")); } + + public void testSynonyms() throws IOException { + Builder builder = Settings.builder() + .put(indexSettings()) + .put("index.analysis.analyzer.synonym.tokenizer", "whitespace") + .putArray("index.analysis.analyzer.synonym.filter", "synonym", "lowercase") + .put("index.analysis.filter.synonym.type", "synonym") + .putArray("index.analysis.filter.synonym.synonyms", "fast,quick"); + + assertAcked(prepareCreate("test").setSettings(builder.build()) + .addMapping("type1", "field1", + "type=text,term_vector=with_positions_offsets,search_analyzer=synonym," + + "analyzer=english,index_options=offsets")); + ensureGreen(); + + client().prepareIndex("test", "type1", "0").setSource( + "field1", "The quick brown fox jumps over the lazy dog").get(); + refresh(); + for (String highlighterType : new String[] {"plain", "postings", "fvh"}) { + logger.info("--> highlighting (type=" + highlighterType + ") and searching on field1"); + SearchSourceBuilder source = searchSource() + .query(matchQuery("field1", "quick brown fox").operator(Operator.AND)) + .highlighter( + highlight() + .field("field1") + .order("score") + .preTags("") + .postTags("") + .highlighterType(highlighterType)); + SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + assertHighlight(searchResponse, 0, "field1", 0, 1, + equalTo("The quick brown fox jumps over the lazy dog")); + + source = searchSource() + .query(matchQuery("field1", "fast brown fox").operator(Operator.AND)) + .highlighter(highlight().field("field1").order("score").preTags("").postTags("")); + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + assertHighlight(searchResponse, 0, "field1", 0, 1, + equalTo("The quick brown fox jumps over the lazy dog")); + } + } + + public void testHighlightQueryRewriteDatesWithNow() throws Exception { + assertAcked(client().admin().indices().prepareCreate("index-1").addMapping("type", "d", "type=date", + "field", "type=text,store=true,term_vector=with_positions_offsets") + .setSettings("index.number_of_replicas", 0, "index.number_of_shards", 2) + .get()); + DateTime now = new DateTime(ISOChronology.getInstanceUTC()); + indexRandom(true, client().prepareIndex("index-1", "type", "1").setSource("d", now, "field", "hello world"), + client().prepareIndex("index-1", "type", "2").setSource("d", now.minusDays(1), "field", "hello"), + client().prepareIndex("index-1", "type", "3").setSource("d", now.minusDays(2), "field", "world")); + ensureSearchable("index-1"); + for (int i = 0; i < 5; i++) { + final SearchResponse r1 = client().prepareSearch("index-1") + .addSort("d", SortOrder.DESC) + .setTrackScores(true) + .highlighter(highlight() + .field("field") + .preTags("") + .postTags("") + ).setQuery(QueryBuilders.boolQuery().must( + QueryBuilders.rangeQuery("d").gte("now-12h").lte("now").includeLower(true).includeUpper(true).boost(1.0f)) + .should(QueryBuilders.termQuery("field", "hello"))) + .get(); + + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(1L)); + assertHighlight(r1, 0, "field", 0, 1, + equalTo("hello world")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index e66eeb48766..da844b1969e 100644 --- a/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -36,7 +36,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -285,15 +285,18 @@ public class SearchFieldsIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script("doc['num1'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) - .addScriptField("sNum1_field", new Script("_fields['num1'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) - .addScriptField("date1", new Script("doc['date'].date.millis", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) + .addScriptField("sNum1_field", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields['num1'].value", Collections.emptyMap())) + .addScriptField("date1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap())) .execute().actionGet(); assertNoFailures(response); assertThat(response.getHits().totalHits(), equalTo(3L)); - assertThat(response.getHits().getAt(0).hasSource(), equalTo(true)); + assertFalse(response.getHits().getAt(0).hasSource()); assertThat(response.getHits().getAt(0).id(), equalTo("1")); Set fields = new HashSet<>(response.getHits().getAt(0).fields().keySet()); fields.remove(TimestampFieldMapper.NAME); // randomly enabled via templates @@ -321,7 +324,7 @@ public class SearchFieldsIT extends ESIntegTestCase { response = client().prepareSearch() .setQuery(matchAllQuery()) .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script("doc['num1'].value * factor", ScriptType.INLINE, CustomScriptPlugin.NAME, params)) + .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value * factor", params)) .get(); assertThat(response.getHits().totalHits(), equalTo(3L)); @@ -357,7 +360,7 @@ public class SearchFieldsIT extends ESIntegTestCase { .setQuery(matchAllQuery()) .addSort("num1", SortOrder.ASC) .setSize(numDocs) - .addScriptField("uid", new Script("_fields._uid.value", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("uid", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._uid.value", Collections.emptyMap())) .get(); assertNoFailures(response); @@ -375,7 +378,7 @@ public class SearchFieldsIT extends ESIntegTestCase { .setQuery(matchAllQuery()) .addSort("num1", SortOrder.ASC) .setSize(numDocs) - .addScriptField("id", new Script("_fields._id.value", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._id.value", Collections.emptyMap())) .get(); assertNoFailures(response); @@ -393,7 +396,8 @@ public class SearchFieldsIT extends ESIntegTestCase { .setQuery(matchAllQuery()) .addSort("num1", SortOrder.ASC) .setSize(numDocs) - .addScriptField("type", new Script("_fields._type.value", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("type", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._type.value", Collections.emptyMap())) .get(); assertNoFailures(response); @@ -411,9 +415,10 @@ public class SearchFieldsIT extends ESIntegTestCase { .setQuery(matchAllQuery()) .addSort("num1", SortOrder.ASC) .setSize(numDocs) - .addScriptField("id", new Script("_fields._id.value", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) - .addScriptField("uid", new Script("_fields._uid.value", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) - .addScriptField("type", new Script("_fields._type.value", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._id.value", Collections.emptyMap())) + .addScriptField("uid", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._uid.value", Collections.emptyMap())) + .addScriptField("type", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._type.value", Collections.emptyMap())) .get(); assertNoFailures(response); @@ -444,11 +449,13 @@ public class SearchFieldsIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) - .addScriptField("s_obj1", new Script("_source.obj1", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) - .addScriptField("s_obj1_test", new Script("_source.obj1.test", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) - .addScriptField("s_obj2", new Script("_source.obj2", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) - .addScriptField("s_obj2_arr2", new Script("_source.obj2.arr2", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) - .addScriptField("s_arr3", new Script("_source.arr3", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("s_obj1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1", Collections.emptyMap())) + .addScriptField("s_obj1_test", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1.test", Collections.emptyMap())) + .addScriptField("s_obj2", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2", Collections.emptyMap())) + .addScriptField("s_obj2_arr2", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2.arr2", Collections.emptyMap())) + .addScriptField("s_arr3", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.arr3", Collections.emptyMap())) .get(); assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0)); @@ -481,7 +488,8 @@ public class SearchFieldsIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) - .addScriptField("test_script_1", new Script("return null", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("test_script_1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return null", Collections.emptyMap())) .get(); assertNoFailures(response); @@ -819,7 +827,7 @@ public class SearchFieldsIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).fields().get("float_field").value(), equalTo((Object) 5.0)); assertThat(searchResponse.getHits().getAt(0).fields().get("double_field").value(), equalTo((Object) 6.0d)); assertThat(searchResponse.getHits().getAt(0).fields().get("date_field").value(), equalTo((Object) 1332374400000L)); - assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) 1L)); + assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).fields().get("text_field").value(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).fields().get("keyword_field").value(), equalTo("foo")); } @@ -847,7 +855,8 @@ public class SearchFieldsIT extends ESIntegTestCase { ensureSearchable(); SearchRequestBuilder req = client().prepareSearch("index"); for (String field : Arrays.asList("s", "ms", "l", "ml", "d", "md")) { - req.addScriptField(field, new Script("doc['" + field + "'].values", ScriptType.INLINE, CustomScriptPlugin.NAME, null)); + req.addScriptField(field, + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + field + "'].values", Collections.emptyMap())); } SearchResponse resp = req.get(); assertSearchResponse(resp); diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index 1c87ccfe4cb..9b732bdc00d 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -32,7 +32,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ExplainableSearchScript; import org.elasticsearch.script.NativeScriptFactory; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESIntegTestCase; @@ -44,6 +44,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; @@ -76,7 +77,8 @@ public class ExplainableScriptIT extends ESIntegTestCase { SearchResponse response = client().search(searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().explain(true).query( functionScoreQuery(termQuery("text", "text"), - scriptFunction(new Script("native_explainable_script", ScriptType.INLINE, "native", null))) + scriptFunction( + new Script(ScriptType.INLINE, "native", "native_explainable_script", Collections.emptyMap()))) .boostMode(CombineFunction.REPLACE)))).actionGet(); ElasticsearchAssertions.assertNoFailures(response); diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityIT.java index 655aecd8fb5..7ef8cfe78d1 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityIT.java @@ -43,8 +43,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -/** - */ public class FunctionScoreBackwardCompatibilityIT extends ESBackcompatTestCase { /** * Simple upgrade test for function score. diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java index 3854d200116..e9cb9d72a8b 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java @@ -48,7 +48,8 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; -import static org.elasticsearch.script.ScriptService.ScriptType; + +import org.elasticsearch.script.ScriptType; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -93,8 +94,8 @@ public class FunctionScoreIT extends ESIntegTestCase { index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); - Script scriptOne = new Script("1", ScriptType.INLINE, CustomScriptPlugin.NAME, null); - Script scriptTwo = new Script("get score value", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script scriptOne = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "1", Collections.emptyMap()); + Script scriptTwo = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); SearchResponse response = client().search( searchRequest().source( @@ -117,7 +118,7 @@ public class FunctionScoreIT extends ESIntegTestCase { index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); - Script script = new Script("get score value", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); SearchResponse response = client().search( searchRequest().source( @@ -145,7 +146,7 @@ public class FunctionScoreIT extends ESIntegTestCase { refresh(); ensureYellow(); - Script script = new Script("doc['random_score']", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['random_score']", Collections.emptyMap()); SearchResponse searchResponse = client().search( searchRequest().source(searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore))) ).actionGet(); @@ -177,7 +178,7 @@ public class FunctionScoreIT extends ESIntegTestCase { docs.add(client().prepareIndex(INDEX, TYPE, Integer.toString(i)).setSource("num", i + scoreOffset)); } indexRandom(true, docs); - Script script = new Script("return (doc['num'].value)", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return (doc['num'].value)", Collections.emptyMap()); int numMatchingDocs = numDocs + scoreOffset - minScore; if (numMatchingDocs < 0) { numMatchingDocs = 0; diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index 5fbfbba46db..21622d0e4ae 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -52,9 +52,6 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.hamcrest.Matchers.equalTo; -/** - * - */ @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) public class FunctionScorePluginIT extends ESIntegTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index 364186572d9..242847587f6 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -66,12 +66,10 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThir import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -/** - * - */ public class QueryRescorerIT extends ESIntegTestCase { public void testEnforceWindowSize() { createIndex("test"); @@ -100,6 +98,7 @@ public class QueryRescorerIT extends ESIntegTestCase { numDocsWith100AsAScore += 1; } } + assertThat(searchResponse.getHits().maxScore(), equalTo(searchResponse.getHits().getHits()[0].score())); // we cannot assert that they are equal since some shards might not have docs at all assertThat(numDocsWith100AsAScore, lessThanOrEqualTo(numShards)); } @@ -125,6 +124,7 @@ public class QueryRescorerIT extends ESIntegTestCase { .setRescoreQueryWeight(2), 5).execute().actionGet(); assertThat(searchResponse.getHits().totalHits(), equalTo(3L)); + assertThat(searchResponse.getHits().maxScore(), equalTo(searchResponse.getHits().getHits()[0].score())); assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3")); assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2")); @@ -145,6 +145,7 @@ public class QueryRescorerIT extends ESIntegTestCase { .actionGet(); assertHitCount(searchResponse, 3); + assertThat(searchResponse.getHits().maxScore(), equalTo(searchResponse.getHits().getHits()[0].score())); assertFirstHit(searchResponse, hasId("1")); assertSecondHit(searchResponse, hasId("2")); assertThirdHit(searchResponse, hasId("3")); @@ -206,6 +207,7 @@ public class QueryRescorerIT extends ESIntegTestCase { assertThat(searchResponse.getHits().hits().length, equalTo(5)); assertHitCount(searchResponse, 9); + assertThat(searchResponse.getHits().maxScore(), equalTo(searchResponse.getHits().getHits()[0].score())); assertFirstHit(searchResponse, hasId("2")); assertSecondHit(searchResponse, hasId("6")); assertThirdHit(searchResponse, hasId("3")); @@ -222,6 +224,7 @@ public class QueryRescorerIT extends ESIntegTestCase { assertThat(searchResponse.getHits().hits().length, equalTo(5)); assertHitCount(searchResponse, 9); + assertThat(searchResponse.getHits().maxScore(), greaterThan(searchResponse.getHits().getHits()[0].score())); assertFirstHit(searchResponse, hasId("3")); } @@ -255,6 +258,7 @@ public class QueryRescorerIT extends ESIntegTestCase { .setSize(5).execute().actionGet(); assertThat(searchResponse.getHits().hits().length, equalTo(4)); assertHitCount(searchResponse, 4); + assertThat(searchResponse.getHits().maxScore(), equalTo(searchResponse.getHits().getHits()[0].score())); assertFirstHit(searchResponse, hasId("3")); assertSecondHit(searchResponse, hasId("6")); assertThirdHit(searchResponse, hasId("1")); @@ -271,6 +275,7 @@ public class QueryRescorerIT extends ESIntegTestCase { // Only top 2 hits were re-ordered: assertThat(searchResponse.getHits().hits().length, equalTo(4)); assertHitCount(searchResponse, 4); + assertThat(searchResponse.getHits().maxScore(), equalTo(searchResponse.getHits().getHits()[0].score())); assertFirstHit(searchResponse, hasId("6")); assertSecondHit(searchResponse, hasId("3")); assertThirdHit(searchResponse, hasId("1")); @@ -288,6 +293,7 @@ public class QueryRescorerIT extends ESIntegTestCase { // Only top 3 hits were re-ordered: assertThat(searchResponse.getHits().hits().length, equalTo(4)); assertHitCount(searchResponse, 4); + assertThat(searchResponse.getHits().maxScore(), equalTo(searchResponse.getHits().getHits()[0].score())); assertFirstHit(searchResponse, hasId("6")); assertSecondHit(searchResponse, hasId("1")); assertThirdHit(searchResponse, hasId("3")); @@ -324,6 +330,7 @@ public class QueryRescorerIT extends ESIntegTestCase { .setSize(5).execute().actionGet(); assertThat(searchResponse.getHits().hits().length, equalTo(4)); assertHitCount(searchResponse, 4); + assertThat(searchResponse.getHits().maxScore(), equalTo(searchResponse.getHits().getHits()[0].score())); assertFirstHit(searchResponse, hasId("3")); assertSecondHit(searchResponse, hasId("6")); assertThirdHit(searchResponse, hasId("1")); @@ -339,6 +346,7 @@ public class QueryRescorerIT extends ESIntegTestCase { .setQueryWeight(1.0f).setRescoreQueryWeight(-1f), 3).execute().actionGet(); // 6 and 1 got worse, and then the hit (2) outside the rescore window were sorted ahead: + assertThat(searchResponse.getHits().maxScore(), equalTo(searchResponse.getHits().getHits()[0].score())); assertFirstHit(searchResponse, hasId("3")); assertSecondHit(searchResponse, hasId("2")); assertThirdHit(searchResponse, hasId("6")); @@ -598,6 +606,7 @@ public class QueryRescorerIT extends ESIntegTestCase { assertHitCount(rescored, 4); + assertThat(rescored.getHits().maxScore(), equalTo(rescored.getHits().getHits()[0].score())); if ("total".equals(scoreMode) || "".equals(scoreMode)) { assertFirstHit(rescored, hasId(String.valueOf(i + 1))); assertSecondHit(rescored, hasId(String.valueOf(i))); @@ -675,6 +684,7 @@ public class QueryRescorerIT extends ESIntegTestCase { .boostMode(CombineFunction.REPLACE)).setScoreMode(QueryRescoreMode.Total); request.clearRescorers().addRescorer(ninetyIsGood, numDocs).addRescorer(oneToo, 10); response = request.setSize(2).get(); + assertThat(response.getHits().maxScore(), equalTo(response.getHits().getHits()[0].score())); assertFirstHit(response, hasId("91")); assertFirstHit(response, hasScore(2001.0f)); assertSecondHit(response, hasScore(1001.0f)); // Not sure which one it is but it is ninety something diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java index 9c2b41eabe6..a6f7eb760d2 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.ScoreAccessor; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.CoreMatchers; @@ -169,7 +169,7 @@ public class RandomScoreFunctionIT extends ESIntegTestCase { params.put("factor", randomIntBetween(2, 4)); // Test for accessing _score - Script script = new Script("log(doc['index'].value + (factor * _score))", ScriptType.INLINE, NAME, params); + Script script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score))", params); SearchResponse resp = client() .prepareSearch("test") .setQuery( @@ -185,7 +185,7 @@ public class RandomScoreFunctionIT extends ESIntegTestCase { assertThat(firstHit.getScore(), greaterThan(1f)); // Test for accessing _score.intValue() - script = new Script("log(doc['index'].value + (factor * _score.intValue()))", ScriptType.INLINE, NAME, params); + script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.intValue()))", params); resp = client() .prepareSearch("test") .setQuery( @@ -201,7 +201,7 @@ public class RandomScoreFunctionIT extends ESIntegTestCase { assertThat(firstHit.getScore(), greaterThan(1f)); // Test for accessing _score.longValue() - script = new Script("log(doc['index'].value + (factor * _score.longValue()))", ScriptType.INLINE, NAME, params); + script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.longValue()))", params); resp = client() .prepareSearch("test") .setQuery( @@ -217,7 +217,7 @@ public class RandomScoreFunctionIT extends ESIntegTestCase { assertThat(firstHit.getScore(), greaterThan(1f)); // Test for accessing _score.floatValue() - script = new Script("log(doc['index'].value + (factor * _score.floatValue()))", ScriptType.INLINE, NAME, params); + script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.floatValue()))", params); resp = client() .prepareSearch("test") .setQuery( @@ -233,7 +233,7 @@ public class RandomScoreFunctionIT extends ESIntegTestCase { assertThat(firstHit.getScore(), greaterThan(1f)); // Test for accessing _score.doubleValue() - script = new Script("log(doc['index'].value + (factor * _score.doubleValue()))", ScriptType.INLINE, NAME, params); + script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.doubleValue()))", params); resp = client() .prepareSearch("test") .setQuery( diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java index 1ac9147e53a..97615f63c9d 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java @@ -44,9 +44,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class GeoBoundingBoxIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java index 4ea143ed7f9..6c9acd7e8a7 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -20,30 +20,41 @@ package org.elasticsearch.search.geo; import org.elasticsearch.Version; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.geo.GeoHashUtils; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.range.Range; +import org.elasticsearch.search.aggregations.bucket.range.geodistance.InternalGeoDistance; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; +import org.junit.Before; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.script.ScriptService.ScriptType; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.closeTo; @@ -63,7 +74,6 @@ public class GeoDistanceIT extends ESIntegTestCase { public static class CustomScriptPlugin extends MockScriptPlugin { @Override - @SuppressWarnings("unchecked") protected Map, Object>> pluginScripts() { Map, Object>> scripts = new HashMap<>(); @@ -83,15 +93,14 @@ public class GeoDistanceIT extends ESIntegTestCase { return scripts; } - @SuppressWarnings("unchecked") static Double distanceScript(Map vars, Function distance) { Map doc = (Map) vars.get("doc"); return distance.apply((ScriptDocValues.GeoPoints) doc.get("location")); } } - public void testDistanceScript() throws Exception { - + @Before + public void setupTestIndex() throws IOException { Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") @@ -102,7 +111,9 @@ public class GeoDistanceIT extends ESIntegTestCase { xContentBuilder.endObject().endObject().endObject().endObject(); assertAcked(prepareCreate("test").setSettings(settings).addMapping("type1", xContentBuilder)); ensureGreen(); + } + public void testDistanceScript() throws Exception { client().prepareIndex("test", "type1", "1") .setSource(jsonBuilder().startObject() .field("name", "TestPosition") @@ -117,7 +128,7 @@ public class GeoDistanceIT extends ESIntegTestCase { // Test doc['location'].arcDistance(lat, lon) SearchResponse searchResponse1 = client().prepareSearch().addStoredField("_source") - .addScriptField("distance", new Script("arcDistance", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("distance", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "arcDistance", Collections.emptyMap())) .get(); Double resultDistance1 = searchResponse1.getHits().getHits()[0].getFields().get("distance").getValue(); assertThat(resultDistance1, @@ -125,16 +136,16 @@ public class GeoDistanceIT extends ESIntegTestCase { // Test doc['location'].planeDistance(lat, lon) SearchResponse searchResponse2 = client().prepareSearch().addStoredField("_source") - .addScriptField("distance", new Script("planeDistance", ScriptType.INLINE, - CustomScriptPlugin.NAME, null)).get(); + .addScriptField("distance", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "planeDistance", + Collections.emptyMap())).get(); Double resultDistance2 = searchResponse2.getHits().getHits()[0].getFields().get("distance").getValue(); assertThat(resultDistance2, closeTo(GeoUtils.planeDistance(src_lat, src_lon, tgt_lat, tgt_lon), 0.01d)); // Test doc['location'].geohashDistance(lat, lon) SearchResponse searchResponse4 = client().prepareSearch().addStoredField("_source") - .addScriptField("distance", new Script("geohashDistance", ScriptType.INLINE, - CustomScriptPlugin.NAME, null)).get(); + .addScriptField("distance", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "geohashDistance", + Collections.emptyMap())).get(); Double resultDistance4 = searchResponse4.getHits().getHits()[0].getFields().get("distance").getValue(); assertThat(resultDistance4, closeTo(GeoUtils.arcDistance(src_lat, src_lon, GeoHashUtils.decodeLatitude(tgt_geohash), @@ -142,18 +153,55 @@ public class GeoDistanceIT extends ESIntegTestCase { // Test doc['location'].arcDistance(lat, lon + 360)/1000d SearchResponse searchResponse5 = client().prepareSearch().addStoredField("_source") - .addScriptField("distance", new Script("arcDistance(lat, lon + 360)/1000d", ScriptType.INLINE, - CustomScriptPlugin.NAME, null)).get(); + .addScriptField("distance", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "arcDistance(lat, lon + 360)/1000d", + Collections.emptyMap())).get(); Double resultArcDistance5 = searchResponse5.getHits().getHits()[0].getFields().get("distance").getValue(); assertThat(resultArcDistance5, closeTo(GeoUtils.arcDistance(src_lat, src_lon, tgt_lat, tgt_lon)/1000d, 0.01d)); // Test doc['location'].arcDistance(lat + 360, lon)/1000d SearchResponse searchResponse6 = client().prepareSearch().addStoredField("_source") - .addScriptField("distance", new Script("arcDistance(lat + 360, lon)/1000d", ScriptType.INLINE, - CustomScriptPlugin.NAME, null)).get(); + .addScriptField("distance", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "arcDistance(lat + 360, lon)/1000d", + Collections.emptyMap())).get(); Double resultArcDistance6 = searchResponse6.getHits().getHits()[0].getFields().get("distance").getValue(); assertThat(resultArcDistance6, closeTo(GeoUtils.arcDistance(src_lat, src_lon, tgt_lat, tgt_lon)/1000d, 0.01d)); } + + public void testGeoDistanceAggregation() throws IOException { + client().prepareIndex("test", "type1", "1") + .setSource(jsonBuilder().startObject() + .field("name", "TestPosition") + .startObject("location") + .field("lat", src_lat) + .field("lon", src_lon) + .endObject() + .endObject()) + .get(); + + refresh(); + + SearchRequestBuilder search = client().prepareSearch("test"); + String name = "TestPosition"; + + search.setQuery(QueryBuilders.matchAllQuery()) + .setTypes("type1") + .addAggregation(AggregationBuilders.geoDistance(name, new GeoPoint(tgt_lat, tgt_lon)) + .field("location") + .unit(DistanceUnit.MILES) + .addRange(0, 25000)); + + search.setSize(0); // no hits please + + SearchResponse response = search.get(); + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + InternalGeoDistance geoDistance = aggregations.get(name); + assertNotNull(geoDistance); + + List buckets = ((Range) geoDistance).getBuckets(); + assertNotNull("Buckets should not be null", buckets); + assertEquals("Unexpected number of buckets", 1, buckets.size()); + assertEquals("Unexpected doc count for geo distance", 1, buckets.get(0).getDocCount()); + } } diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index cc832f8a7d1..7f880211c3b 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -93,9 +93,6 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -/** - * - */ public class GeoFilterIT extends ESIntegTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchIT.java b/core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchIT.java index 606e9a18f2d..168729d5c0b 100644 --- a/core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchIT.java @@ -31,9 +31,6 @@ import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class SimpleIndicesBoostSearchIT extends ESIntegTestCase { public void testIndicesBoost() throws Exception { assertHitCount(client().prepareSearch().setQuery(termQuery("test", "value")).get(), 0); diff --git a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java index 20216e10593..2cb425d5274 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.internal; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.text.Text; @@ -26,7 +27,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; -import java.io.ByteArrayInputStream; import java.io.InputStream; import java.util.HashMap; import java.util.Map; @@ -76,4 +76,22 @@ public class InternalSearchHitTests extends ESTestCase { assertThat(results.getAt(1).shard(), equalTo(target)); } + public void testNullSource() throws Exception { + InternalSearchHit searchHit = new InternalSearchHit(0, "_id", new Text("_type"), null); + + assertThat(searchHit.source(), nullValue()); + assertThat(searchHit.sourceRef(), nullValue()); + assertThat(searchHit.sourceAsMap(), nullValue()); + assertThat(searchHit.sourceAsString(), nullValue()); + assertThat(searchHit.getSource(), nullValue()); + assertThat(searchHit.getSourceRef(), nullValue()); + assertThat(searchHit.getSourceAsString(), nullValue()); + } + + public void testHasSource() { + InternalSearchHit searchHit = new InternalSearchHit(randomInt()); + assertFalse(searchHit.hasSource()); + searchHit.sourceRef(new BytesArray("{}")); + assertTrue(searchHit.hasSource()); + } } diff --git a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index 452b6b6ba3a..819f93fcc0f 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -19,21 +19,51 @@ package org.elasticsearch.search.internal; +import org.elasticsearch.Version; +import org.elasticsearch.action.ShardValidateQueryRequestTests; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.RandomQueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.InvalidAliasNameException; import org.elasticsearch.search.AbstractSearchTestCase; import java.io.IOException; +import java.util.Base64; +import java.util.function.Function; + +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { + private IndexMetaData baseMetaData = IndexMetaData.builder("test").settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()) + .numberOfShards(1).numberOfReplicas(1).build(); public void testSerialization() throws Exception { ShardSearchTransportRequest shardSearchTransportRequest = createShardSearchTransportRequest(); @@ -43,7 +73,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { ShardSearchTransportRequest deserializedRequest = new ShardSearchTransportRequest(); deserializedRequest.readFrom(in); assertEquals(deserializedRequest.scroll(), shardSearchTransportRequest.scroll()); - assertArrayEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases()); + assertEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases()); assertArrayEquals(deserializedRequest.indices(), shardSearchTransportRequest.indices()); assertArrayEquals(deserializedRequest.types(), shardSearchTransportRequest.types()); assertEquals(deserializedRequest.indicesOptions(), shardSearchTransportRequest.indicesOptions()); @@ -55,6 +85,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { assertEquals(deserializedRequest.numberOfShards(), shardSearchTransportRequest.numberOfShards()); assertEquals(deserializedRequest.cacheKey(), shardSearchTransportRequest.cacheKey()); assertNotSame(deserializedRequest, shardSearchTransportRequest); + assertEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases()); } } } @@ -64,13 +95,129 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { ShardId shardId = new ShardId(randomAsciiOfLengthBetween(2, 10), randomAsciiOfLengthBetween(2, 10), randomInt()); ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "reason")); - String[] filteringAliases; + final AliasFilter filteringAliases; if (randomBoolean()) { - filteringAliases = generateRandomStringArray(10, 10, false, false); + String[] strings = generateRandomStringArray(10, 10, false, false); + filteringAliases = new AliasFilter(RandomQueryBuilder.createQuery(random()), strings); } else { - filteringAliases = Strings.EMPTY_ARRAY; + filteringAliases = new AliasFilter(null, Strings.EMPTY_ARRAY); } return new ShardSearchTransportRequest(searchRequest, shardRouting, randomIntBetween(1, 100), filteringAliases, Math.abs(randomLong())); } + + public void testFilteringAliases() throws Exception { + IndexMetaData indexMetaData = baseMetaData; + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat"))); + indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "dog"))); + indexMetaData = add(indexMetaData, "all", null); + + assertThat(indexMetaData.getAliases().containsKey("cats"), equalTo(true)); + assertThat(indexMetaData.getAliases().containsKey("dogs"), equalTo(true)); + assertThat(indexMetaData.getAliases().containsKey("turtles"), equalTo(false)); + + assertEquals(aliasFilter(indexMetaData, "cats"), QueryBuilders.termQuery("animal", "cat")); + assertEquals(aliasFilter(indexMetaData, "cats", "dogs"), QueryBuilders.boolQuery().should(QueryBuilders.termQuery("animal", "cat")) + .should(QueryBuilders.termQuery("animal", "dog"))); + + // Non-filtering alias should turn off all filters because filters are ORed + assertThat(aliasFilter(indexMetaData,"all"), nullValue()); + assertThat(aliasFilter(indexMetaData, "cats", "all"), nullValue()); + assertThat(aliasFilter(indexMetaData, "all", "cats"), nullValue()); + + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "feline"))); + indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "canine"))); + assertEquals(aliasFilter(indexMetaData, "dogs", "cats"),QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("animal", "canine")) + .should(QueryBuilders.termQuery("animal", "feline"))); + } + + public void testRemovedAliasFilter() throws Exception { + IndexMetaData indexMetaData = baseMetaData; + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat"))); + indexMetaData = remove(indexMetaData, "cats"); + try { + aliasFilter(indexMetaData, "cats"); + fail("Expected InvalidAliasNameException"); + } catch (InvalidAliasNameException e) { + assertThat(e.getMessage(), containsString("Invalid alias name [cats]")); + } + } + + public void testUnknownAliasFilter() throws Exception { + IndexMetaData indexMetaData = baseMetaData; + indexMetaData = add(indexMetaData, "cats", filter(termQuery("animal", "cat"))); + indexMetaData = add(indexMetaData, "dogs", filter(termQuery("animal", "dog"))); + IndexMetaData finalIndexMetadata = indexMetaData; + expectThrows(InvalidAliasNameException.class, () -> aliasFilter(finalIndexMetadata, "unknown")); + } + + public static CompressedXContent filter(QueryBuilder filterBuilder) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.close(); + return new CompressedXContent(builder.string()); + } + + private IndexMetaData remove(IndexMetaData indexMetaData, String alias) { + IndexMetaData build = IndexMetaData.builder(indexMetaData).removeAlias(alias).build(); + return build; + } + + private IndexMetaData add(IndexMetaData indexMetaData, String alias, @Nullable CompressedXContent filter) { + return IndexMetaData.builder(indexMetaData).putAlias(AliasMetaData.builder(alias).filter(filter).build()).build(); + } + + public QueryBuilder aliasFilter(IndexMetaData indexMetaData, String... aliasNames) { + Function contextFactory = (p) -> new QueryParseContext(queriesRegistry, + p, new ParseFieldMatcher(Settings.EMPTY)); + return ShardSearchRequest.parseAliasFilter(contextFactory, indexMetaData, aliasNames); + } + + // BWC test for changes from #20916 + public void testSerialize50Request() throws IOException { + BytesArray requestBytes = new BytesArray(Base64.getDecoder() + // this is a base64 encoded request generated with the same input + .decode("AAh4cXptdEhJcgdnT0d1ZldWyfL/sgQBJAHkDAMBAAIBAQ4TWlljWlZ5TkVmRU5xQnFQVHBjVBRZbUpod2pRV2dDSXVxRXpRaEdGVBRFZWFJY0plT2hn" + + "UEpISFhmSXR6Qw5XZ1hQcmFidWhWalFSQghuUWNwZ2JjQxBtZldRREJPaGF3UnlQSE56EVhQSUtRa25Iekh3bU5kbGVECWlFT2NIeEh3RgZIYXpMTWgUeGJq" + + "VU9Tdkdua3RORU5QZkNrb1EOalRyWGh5WXhvZ3plV2UUcWlXZFl2eUFUSXdPVGdMUUtYTHAJU3RKR3JxQkVJEkdEQ01xUHpnWWNaT3N3U3prSRIUeURlVFpM" + + "Q1lBZERZcWpDb3NOVWIST1NyQlZtdUNrd0F1UXRvdVRjEGp6RlVMd1dqc3VtUVNaTk0JT3N2cnpLQ3ZLBmRpS1J6cgdYbmVhZnBxBUlTUU9pEEJMcm1ERXVs" + + "eXhESlBoVkgTaWdUUmtVZGh4d0FFc2ZKRm9ZahNrb01XTnFFd2NWSVVDU3pWS2xBC3JVTWV3V2tUUWJUE3VGQU1Hd21CYUFMTmNQZkxobXUIZ3dxWHBxWXcF" + + "bmNDZUEOTFBSTEpYZVF6Z3d2eE0PV1BucUFacll6WWRxa1hCDGxkbXNMaVRzcUZXbAtSY0NsY3FNdlJQcv8BAP////8PAQAAARQAAQp5THlIcHdQeGtMAAAB" + + "AQAAAAEDbkVLAQMBCgACAAADAQABAAAAAQhIc25wRGxQbwEBQgABAAACAQMAAAEIAAAJMF9OSG9kSmh2HwABAwljRW5MVWxFbVQFemlxWG8KcXZQTkRUUGJk" + + "bgECCkpMbXVMT1dtVnkISEdUUHhsd0cBAAEJAAABA2lkcz+rKsUAAAAAAAAAAAECAQYAAgwxX0ZlRWxSQkhzQ07/////DwABAAEDCnRyYXFHR1hjVHkKTERY" + + "aE1HRWVySghuSWtzbEtXUwABCgEHSlRwQnhwdwAAAQECAgAAAAAAAQcyX3FlYmNDGQEEBklxZU9iUQdTc01Gek5YCWlMd2xuamNRQwNiVncAAUHt61kAAQR0" + + "ZXJtP4AAAAANbUtDSnpHU3lidm5KUBUMaVpqeG9vcm5QSFlvAAEBLGdtcWxuRWpWTXdvTlhMSHh0RWlFdHBnbEF1cUNmVmhoUVlwRFZxVllnWWV1A2ZvbwEA" + + "AQhwYWlubGVzc/8AALk4AAAAAAABAAAAAAAAAwpKU09PU0ZmWnhFClVqTGxMa2p3V2gKdUJwZ3R3dXFER5Hg97uT7MOmPgEADw")); + try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { + in.setVersion(Version.V_5_0_0); + ShardSearchTransportRequest readRequest = new ShardSearchTransportRequest(); + readRequest.readFrom(in); + assertEquals(0, in.available()); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> readRequest.filteringAliases()); + assertEquals("alias filter for aliases: [JSOOSFfZxE, UjLlLkjwWh, uBpgtwuqDG] must be rewritten first", + illegalStateException.getMessage()); + IndexMetaData.Builder indexMetadata = new IndexMetaData.Builder(baseMetaData) + .putAlias(AliasMetaData.newAliasMetaDataBuilder("JSOOSFfZxE").filter("{\"term\" : {\"foo\" : \"bar\"}}")) + .putAlias(AliasMetaData.newAliasMetaDataBuilder("UjLlLkjwWh").filter("{\"term\" : {\"foo\" : \"bar1\"}}")) + .putAlias(AliasMetaData.newAliasMetaDataBuilder("uBpgtwuqDG").filter("{\"term\" : {\"foo\" : \"bar2\"}}")); + IndexSettings indexSettings = new IndexSettings(indexMetadata.build(), Settings.EMPTY); + final long nowInMillis = randomPositiveLong(); + QueryShardContext context = new QueryShardContext( + 0, indexSettings, null, null, null, null, null, queriesRegistry, null, null, null, + () -> nowInMillis); + readRequest.rewrite(context); + QueryBuilder queryBuilder = readRequest.filteringAliases(); + assertEquals(queryBuilder, QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("foo", "bar")) + .should(QueryBuilders.termQuery("foo", "bar1")) + .should(QueryBuilders.termQuery("foo", "bar2")) + ); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(Version.V_5_0_0); + readRequest.writeTo(output); + assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); + } + } + } diff --git a/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index 21f15860759..1fd51966c2b 100644 --- a/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -56,9 +56,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThro import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; -/** - * - */ public class MoreLikeThisIT extends ESIntegTestCase { public void testSimpleMoreLikeThis() throws Exception { logger.info("Creating index test"); diff --git a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index 693fffa307a..1a10a700948 100644 --- a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -122,6 +122,13 @@ public class MultiMatchQueryIT extends ESIntegTestCase { "last_name", "", "category", "marvel hero", "skill", 1)); + + builders.add(client().prepareIndex("test", "test", "nowHero").setSource( + "full_name", "now sort of", + "first_name", "now", + "last_name", "", + "category", "marvel hero", + "skill", 1)); List firstNames = new ArrayList<>(); fill(firstNames, "Captain", between(15, 25)); fill(firstNames, "Ultimate", between(5, 10)); @@ -164,6 +171,9 @@ public class MultiMatchQueryIT extends ESIntegTestCase { .field("norms", false) .field("copy_to", "last_name_phrase") .endObject() + .startObject("date") + .field("type", "date") + .endObject() .endObject() .endObject().endObject(); } @@ -633,6 +643,52 @@ public class MultiMatchQueryIT extends ESIntegTestCase { .lenient(true))).get(); assertHitCount(searchResponse, 1L); assertFirstHit(searchResponse, hasId("ultimate1")); + + + // Check that cross fields works with date fields + searchResponse = client().prepareSearch("test") + .setQuery(randomizeType(multiMatchQuery("now", "f*", "date") + .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).lenient(true)) + .get(); + assertHitCount(searchResponse, 1L); + assertFirstHit(searchResponse, hasId("nowHero")); + } + + /** + * Test for edge case where field level boosting is applied to field that doesn't exist on documents on + * one shard. There was an issue reported in https://github.com/elastic/elasticsearch/issues/18710 where a + * `multi_match` query using the fuzziness parameter with a boost on one of two fields returns the + * same document score if both documents are placed on different shard. This test recreates that scenario + * and checks that the returned scores are different. + */ + public void testFuzzyFieldLevelBoosting() throws InterruptedException, ExecutionException { + String idx = "test18710"; + CreateIndexRequestBuilder builder = prepareCreate(idx).setSettings(Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, 3) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + ); + assertAcked(builder.addMapping("type", "title", "type=string", "body", "type=string")); + ensureGreen(); + List builders = new ArrayList<>(); + builders.add(client().prepareIndex(idx, "type", "1").setSource( + "title", "foo", + "body", "bar")); + builders.add(client().prepareIndex(idx, "type", "2").setSource( + "title", "bar", + "body", "foo")); + indexRandom(true, false, builders); + + SearchResponse searchResponse = client().prepareSearch(idx) + .setExplain(true) + .setQuery(multiMatchQuery("foo").field("title", 100).field("body") + .fuzziness(0) + ).get(); + SearchHit[] hits = searchResponse.getHits().getHits(); + assertNotEquals("both documents should be on different shards", hits[0].getShard().getShardId(), hits[1].getShard().getShardId()); + assertEquals("1", hits[0].getId()); + assertEquals("2", hits[1].getId()); + assertThat(hits[0].getScore(), greaterThan(hits[1].score())); } private static void assertEquivalent(String query, SearchResponse left, SearchResponse right) { diff --git a/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 5bbae82b7d7..65aa5f992e6 100644 --- a/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestSearchContext; @@ -54,6 +55,7 @@ public class QueryPhaseTests extends ESTestCase { TestSearchContext context = new TestSearchContext(null); context.parsedQuery(new ParsedQuery(query)); context.setSize(0); + context.setTask(new SearchTask(123L, "", "", "", null)); IndexSearcher searcher = new IndexSearcher(reader); final AtomicBoolean collected = new AtomicBoolean(); @@ -123,6 +125,7 @@ public class QueryPhaseTests extends ESTestCase { TestSearchContext context = new TestSearchContext(null); context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); context.setSize(0); + context.setTask(new SearchTask(123L, "", "", "", null)); final AtomicBoolean collected = new AtomicBoolean(); IndexSearcher contextSearcher = new IndexSearcher(new MultiReader()) { @@ -146,6 +149,7 @@ public class QueryPhaseTests extends ESTestCase { TestSearchContext context = new TestSearchContext(null); context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); context.setSize(0); + context.setTask(new SearchTask(123L, "", "", "", null)); final AtomicBoolean collected = new AtomicBoolean(); IndexSearcher contextSearcher = new IndexSearcher(new MultiReader()) { diff --git a/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java new file mode 100644 index 00000000000..79dea0e74b5 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java @@ -0,0 +1,259 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.query.Operator; +import org.elasticsearch.index.query.QueryStringQueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.test.ESIntegTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; +import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class QueryStringIT extends ESIntegTestCase { + + @Before + public void setup() throws Exception { + String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); + prepareCreate("test").setSource(indexBody).get(); + ensureGreen("test"); + } + + private QueryStringQueryBuilder lenientQuery(String queryText) { + return queryStringQuery(queryText).lenient(true); + } + + public void testBasicAllQuery() throws Exception { + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo bar baz")); + reqs.add(client().prepareIndex("test", "doc", "2").setSource("f2", "Bar")); + reqs.add(client().prepareIndex("test", "doc", "3").setSource("f3", "foo bar baz")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get(); + assertHitCount(resp, 2L); + assertHits(resp.getHits(), "1", "3"); + + resp = client().prepareSearch("test").setQuery(queryStringQuery("bar")).get(); + assertHitCount(resp, 2L); + assertHits(resp.getHits(), "1", "3"); + + resp = client().prepareSearch("test").setQuery(queryStringQuery("Bar")).get(); + assertHitCount(resp, 3L); + assertHits(resp.getHits(), "1", "2", "3"); + + resp = client().prepareSearch("test").setQuery(queryStringQuery("foa")).get(); + assertHitCount(resp, 1L); + assertHits(resp.getHits(), "3"); + } + + public void testWithDate() throws Exception { + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo", "f_date", "2015/09/02")); + reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar", "f_date", "2015/09/01")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo bar")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + + resp = client().prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")).get(); + assertHits(resp.getHits(), "1"); + assertHitCount(resp, 1L); + + resp = client().prepareSearch("test").setQuery(queryStringQuery("bar \"2015/09/02\"")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + + resp = client().prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\" \"2015/09/01\"")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + } + + public void testWithLotsOfTypes() throws Exception { + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo", + "f_date", "2015/09/02", + "f_float", "1.7", + "f_ip", "127.0.0.1")); + reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar", + "f_date", "2015/09/01", + "f_float", "1.8", + "f_ip", "127.0.0.2")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo bar")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + + resp = client().prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")).get(); + assertHits(resp.getHits(), "1"); + assertHitCount(resp, 1L); + + resp = client().prepareSearch("test").setQuery(queryStringQuery("127.0.0.2 \"2015/09/02\"")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + + resp = client().prepareSearch("test").setQuery(queryStringQuery("127.0.0.1 1.8")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + } + + public void testDocWithAllTypes() throws Exception { + List reqs = new ArrayList<>(); + String docBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-example-document.json"); + reqs.add(client().prepareIndex("test", "doc", "1").setSource(docBody)); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(queryStringQuery("Bar")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(queryStringQuery("Baz")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(queryStringQuery("sbaz")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(queryStringQuery("19")).get(); + assertHits(resp.getHits(), "1"); + // nested doesn't match because it's hidden + resp = client().prepareSearch("test").setQuery(queryStringQuery("1476383971")).get(); + assertHits(resp.getHits(), "1"); + // bool doesn't match + resp = client().prepareSearch("test").setQuery(queryStringQuery("7")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(queryStringQuery("23")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(queryStringQuery("1293")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(queryStringQuery("42")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(queryStringQuery("1.7")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(queryStringQuery("1.5")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(queryStringQuery("12.23")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(queryStringQuery("127.0.0.1")).get(); + assertHits(resp.getHits(), "1"); + // binary doesn't match + // suggest doesn't match + // geo_point doesn't match + // geo_shape doesn't match + } + + public void testKeywordWithWhitespace() throws Exception { + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test", "doc", "1").setSource("f2", "Foo Bar")); + reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar")); + reqs.add(client().prepareIndex("test", "doc", "3").setSource("f1", "foo bar")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get(); + assertHits(resp.getHits(), "3"); + assertHitCount(resp, 1L); + + resp = client().prepareSearch("test").setQuery(queryStringQuery("bar")).get(); + assertHits(resp.getHits(), "2", "3"); + assertHitCount(resp, 2L); + + resp = client().prepareSearch("test") + .setQuery(queryStringQuery("Foo Bar").splitOnWhitespace(false)) + .get(); + assertHits(resp.getHits(), "1", "2", "3"); + assertHitCount(resp, 3L); + } + + public void testExplicitAllFieldsRequested() throws Exception { + String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index-with-all.json"); + prepareCreate("test2").setSource(indexBody).get(); + ensureGreen("test2"); + + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test2", "doc", "1").setSource("f1", "foo", "f2", "eggplant")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test2").setQuery( + queryStringQuery("foo eggplent").defaultOperator(Operator.AND)).get(); + assertHitCount(resp, 0L); + + resp = client().prepareSearch("test2").setQuery( + queryStringQuery("foo eggplent").defaultOperator(Operator.AND).useAllFields(true)).get(); + assertHits(resp.getHits(), "1"); + assertHitCount(resp, 1L); + + Exception e = expectThrows(Exception.class, () -> + client().prepareSearch("test2").setQuery( + queryStringQuery("blah").field("f1").useAllFields(true)).get()); + assertThat(ExceptionsHelper.detailedMessage(e), + containsString("cannot use [all_fields] parameter in conjunction with [default_field] or [fields]")); + } + + @LuceneTestCase.AwaitsFix(bugUrl="currently can't perform phrase queries on fields that don't support positions") + public void testPhraseQueryOnFieldWithNoPositions() throws Exception { + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo bar", "f4", "eggplant parmesan")); + reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "foo bar", "f4", "chicken parmesan")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("\"eggplant parmesan\"")).get(); + assertHits(resp.getHits(), "1"); + assertHitCount(resp, 1L); + } + + private void assertHits(SearchHits hits, String... ids) { + assertThat(hits.totalHits(), equalTo((long) ids.length)); + Set hitIds = new HashSet<>(); + for (SearchHit hit : hits.getHits()) { + hitIds.add(hit.id()); + } + assertThat(hitIds, containsInAnyOrder(ids)); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 1d54b412d6c..1cb9d6508aa 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -480,19 +480,19 @@ public class SearchQueryIT extends ESIntegTestCase { client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); - SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("value*").analyzeWildcard(true)).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("value*")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("*ue*").analyzeWildcard(true)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("*ue*")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("*ue_1").analyzeWildcard(true)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("*ue_1")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("val*e_1").analyzeWildcard(true)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("val*e_1")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("v?l*e?1").analyzeWildcard(true)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("v?l*e?1")).get(); assertHitCount(searchResponse, 1L); } @@ -502,18 +502,14 @@ public class SearchQueryIT extends ESIntegTestCase { client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); - SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1").lowercaseExpandedTerms(true)).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1").lowercaseExpandedTerms(false)).get(); - assertHitCount(searchResponse, 0L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("ValUE_*").lowercaseExpandedTerms(true)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("ValUE_*")).get(); assertHitCount(searchResponse, 1L); searchResponse = client().prepareSearch().setQuery(queryStringQuery("vAl*E_1")).get(); assertHitCount(searchResponse, 1L); searchResponse = client().prepareSearch().setQuery(queryStringQuery("[VALUE_1 TO VALUE_3]")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("[VALUE_1 TO VALUE_3]").lowercaseExpandedTerms(false)).get(); - assertHitCount(searchResponse, 0L); } // Issue #3540 @@ -532,16 +528,13 @@ public class SearchQueryIT extends ESIntegTestCase { SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("future:[now/d TO now+2M/d]").lowercaseExpandedTerms(false)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("future:[now/d TO now+2M/d]")).get(); assertHitCount(searchResponse, 1L); - try { - client().prepareSearch().setQuery(queryStringQuery("future:[now/D TO now+2M/d]").lowercaseExpandedTerms(false)).get(); - fail("expected SearchPhaseExecutionException (total failure)"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(e.toString(), containsString("unit [D] not supported for date math")); - } + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch() + .setQuery(queryStringQuery("future:[now/D TO now+2M/d]")).get()); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.toString(), containsString("unit [D] not supported for date math")); } // Issue #7880 @@ -776,12 +769,7 @@ public class SearchQueryIT extends ESIntegTestCase { searchResponse = client().prepareSearch().setQuery(matchQuery("double", "2")).get(); assertHitCount(searchResponse, 1L); assertFirstHit(searchResponse, hasId("2")); - try { - client().prepareSearch().setQuery(matchQuery("double", "2 3 4")).get(); - fail("SearchPhaseExecutionException should have been thrown"); - } catch (SearchPhaseExecutionException ex) { - // number format exception - } + expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); } public void testMultiMatchQuery() throws Exception { @@ -1777,15 +1765,11 @@ public class SearchQueryIT extends ESIntegTestCase { refresh(); //has_child fails if executed on "simple" index - try { - client().prepareSearch("simple") - .setQuery(hasChildQuery("child", matchQuery("text", "value"), ScoreMode.None)).get(); - fail("Should have failed as has_child query can only be executed against parent-child types"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.shardFailures().length, greaterThan(0)); - for (ShardSearchFailure shardSearchFailure : e.shardFailures()) { - assertThat(shardSearchFailure.reason(), containsString("no mapping found for type [child]")); - } + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, + () -> client().prepareSearch("simple").setQuery(hasChildQuery("child", matchQuery("text", "value"), ScoreMode.None)).get()); + assertThat(e.shardFailures().length, greaterThan(0)); + for (ShardSearchFailure shardSearchFailure : e.shardFailures()) { + assertThat(shardSearchFailure.reason(), containsString("no mapping found for type [child]")); } //has_child doesn't get parsed for "simple" index @@ -1983,14 +1967,10 @@ public class SearchQueryIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); // When we use long values, it means we have ms since epoch UTC based so we don't apply any transformation - try { + Exception e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from(1388534400000L).to(1388537940999L).timeZone("+01:00")) - .get(); - fail("A Range Filter using ms since epoch with a TimeZone should raise a ParsingException"); - } catch (SearchPhaseExecutionException e) { - // We expect it - } + .get()); searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00")) @@ -2005,14 +1985,10 @@ public class SearchQueryIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).getId(), is("4")); // A Range Filter on a numeric field with a TimeZone should raise an exception - try { + e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("num").from("0").to("4").timeZone("-01:00")) - .get(); - fail("A Range Filter on a numeric field with a TimeZone should raise a ParsingException"); - } catch (SearchPhaseExecutionException e) { - // We expect it - } + .get()); } public void testSearchEmptyDoc() { diff --git a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 9502a818315..60f89ab326e 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -19,25 +19,33 @@ package org.elasticsearch.search.query; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.SimpleQueryStringFlag; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; -import java.util.Locale; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; -import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; @@ -45,6 +53,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; /** @@ -158,49 +168,6 @@ public class SimpleQueryStringIT extends ESIntegTestCase { assertSearchHits(searchResponse, "6", "7", "8"); } - public void testSimpleQueryStringLowercasing() { - createIndex("test"); - client().prepareIndex("test", "type1", "1").setSource("body", "Professional").get(); - refresh(); - - SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("Professio*")).get(); - assertHitCount(searchResponse, 1L); - assertSearchHits(searchResponse, "1"); - - searchResponse = client().prepareSearch().setQuery( - simpleQueryStringQuery("Professio*").lowercaseExpandedTerms(false)).get(); - assertHitCount(searchResponse, 0L); - - searchResponse = client().prepareSearch().setQuery( - simpleQueryStringQuery("Professionan~1")).get(); - assertHitCount(searchResponse, 1L); - assertSearchHits(searchResponse, "1"); - - searchResponse = client().prepareSearch().setQuery( - simpleQueryStringQuery("Professionan~1").lowercaseExpandedTerms(false)).get(); - assertHitCount(searchResponse, 0L); - } - - public void testQueryStringLocale() { - createIndex("test"); - client().prepareIndex("test", "type1", "1").setSource("body", "bılly").get(); - refresh(); - - SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("BILL*")).get(); - assertHitCount(searchResponse, 0L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("body:BILL*")).get(); - assertHitCount(searchResponse, 0L); - - searchResponse = client().prepareSearch().setQuery( - simpleQueryStringQuery("BILL*").locale(new Locale("tr", "TR"))).get(); - assertHitCount(searchResponse, 1L); - assertSearchHits(searchResponse, "1"); - searchResponse = client().prepareSearch().setQuery( - queryStringQuery("body:BILL*").locale(new Locale("tr", "TR"))).get(); - assertHitCount(searchResponse, 1L); - assertSearchHits(searchResponse, "1"); - } - public void testNestedFieldSimpleQueryString() throws IOException { assertAcked(prepareCreate("test") .addMapping("type1", jsonBuilder() @@ -342,7 +309,7 @@ public class SimpleQueryStringIT extends ESIntegTestCase { refresh(); SearchResponse searchResponse = client().prepareSearch() - .setQuery(simpleQueryStringQuery("Köln*").analyzeWildcard(true).field("location")).get(); + .setQuery(simpleQueryStringQuery("Köln*").field("location")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, 1L); assertSearchHits(searchResponse, "1"); @@ -393,8 +360,216 @@ public class SimpleQueryStringIT extends ESIntegTestCase { refresh(); SearchResponse searchResponse = client().prepareSearch() - .setQuery(simpleQueryStringQuery("the*").analyzeWildcard(true).field("body")).get(); + .setQuery(simpleQueryStringQuery("the*").field("body")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, 0L); } + + public void testBasicAllQuery() throws Exception { + String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); + prepareCreate("test").setSource(indexBody).get(); + ensureGreen("test"); + + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo bar baz")); + reqs.add(client().prepareIndex("test", "doc", "2").setSource("f2", "Bar")); + reqs.add(client().prepareIndex("test", "doc", "3").setSource("f3", "foo bar baz")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); + assertHitCount(resp, 2L); + assertHits(resp.getHits(), "1", "3"); + + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("bar")).get(); + assertHitCount(resp, 2L); + assertHits(resp.getHits(), "1", "3"); + + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")).get(); + assertHitCount(resp, 3L); + assertHits(resp.getHits(), "1", "2", "3"); + + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foa")).get(); + assertHitCount(resp, 1L); + assertHits(resp.getHits(), "3"); + } + + public void testWithDate() throws Exception { + String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); + prepareCreate("test").setSource(indexBody).get(); + ensureGreen("test"); + + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo", "f_date", "2015/09/02")); + reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar", "f_date", "2015/09/01")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")).get(); + assertHits(resp.getHits(), "1"); + assertHitCount(resp, 1L); + + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("bar \"2015/09/02\"")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\" \"2015/09/01\"")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + } + + public void testWithLotsOfTypes() throws Exception { + String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); + prepareCreate("test").setSource(indexBody).get(); + ensureGreen("test"); + + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo", + "f_date", "2015/09/02", + "f_float", "1.7", + "f_ip", "127.0.0.1")); + reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar", + "f_date", "2015/09/01", + "f_float", "1.8", + "f_ip", "127.0.0.2")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")).get(); + assertHits(resp.getHits(), "1"); + assertHitCount(resp, 1L); + + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.2 \"2015/09/02\"")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1 1.8")).get(); + assertHits(resp.getHits(), "1", "2"); + assertHitCount(resp, 2L); + } + + public void testDocWithAllTypes() throws Exception { + String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); + prepareCreate("test").setSource(indexBody).get(); + ensureGreen("test"); + + List reqs = new ArrayList<>(); + String docBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-example-document.json"); + reqs.add(client().prepareIndex("test", "doc", "1").setSource(docBody)); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("Baz")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("sbaz")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("19")).get(); + assertHits(resp.getHits(), "1"); + // nested doesn't match because it's hidden + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("1476383971")).get(); + assertHits(resp.getHits(), "1"); + // bool doesn't match + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("7")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("23")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("1293")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("42")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("1.7")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("1.5")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("12.23")).get(); + assertHits(resp.getHits(), "1"); + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1")).get(); + assertHits(resp.getHits(), "1"); + // binary doesn't match + // suggest doesn't match + // geo_point doesn't match + // geo_shape doesn't match + + resp = client().prepareSearch("test").setQuery( + simpleQueryStringQuery("foo Bar 19 127.0.0.1").defaultOperator(Operator.AND)).get(); + assertHits(resp.getHits(), "1"); + } + + public void testKeywordWithWhitespace() throws Exception { + String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); + prepareCreate("test").setSource(indexBody).get(); + ensureGreen("test"); + + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test", "doc", "1").setSource("f2", "Foo Bar")); + reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar")); + reqs.add(client().prepareIndex("test", "doc", "3").setSource("f1", "foo bar")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); + assertHits(resp.getHits(), "3"); + assertHitCount(resp, 1L); + + resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("bar")).get(); + assertHits(resp.getHits(), "2", "3"); + assertHitCount(resp, 2L); + } + + public void testExplicitAllFieldsRequested() throws Exception { + String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index-with-all.json"); + prepareCreate("test").setSource(indexBody).get(); + ensureGreen("test"); + + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo", "f2", "eggplant")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery( + simpleQueryStringQuery("foo eggplent").defaultOperator(Operator.AND)).get(); + assertHitCount(resp, 0L); + + resp = client().prepareSearch("test").setQuery( + simpleQueryStringQuery("foo eggplent").defaultOperator(Operator.AND).useAllFields(true)).get(); + assertHits(resp.getHits(), "1"); + assertHitCount(resp, 1L); + + Exception e = expectThrows(Exception.class, () -> + client().prepareSearch("test").setQuery( + simpleQueryStringQuery("blah").field("f1").useAllFields(true)).get()); + assertThat(ExceptionsHelper.detailedMessage(e), + containsString("cannot use [all_fields] parameter in conjunction with [fields]")); + } + + @LuceneTestCase.AwaitsFix(bugUrl="currently can't perform phrase queries on fields that don't support positions") + public void testPhraseQueryOnFieldWithNoPositions() throws Exception { + String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); + prepareCreate("test").setSource(indexBody).get(); + ensureGreen("test"); + + List reqs = new ArrayList<>(); + reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo bar", "f4", "eggplant parmesan")); + reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "foo bar", "f4", "chicken parmesan")); + indexRandom(true, false, reqs); + + SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("\"eggplant parmesan\"")).get(); + assertHits(resp.getHits(), "1"); + assertHitCount(resp, 1L); + } + + private void assertHits(SearchHits hits, String... ids) { + assertThat(hits.totalHits(), equalTo((long) ids.length)); + Set hitIds = new HashSet<>(); + for (SearchHit hit : hits.getHits()) { + hitIds.add(hit.id()); + } + assertThat(hitIds, containsInAnyOrder(ids)); + } } diff --git a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java index 1af4a2b1788..36e02da1f1b 100644 --- a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java @@ -25,10 +25,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -56,8 +53,7 @@ import org.junit.BeforeClass; import java.io.IOException; import static java.util.Collections.emptyList; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; public class QueryRescoreBuilderTests extends ESTestCase { @@ -87,7 +83,7 @@ public class QueryRescoreBuilderTests extends ESTestCase { public void testSerialization() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { RescoreBuilder original = randomRescoreBuilder(); - RescoreBuilder deserialized = serializedCopy(original); + RescoreBuilder deserialized = copy(original); assertEquals(deserialized, original); assertEquals(deserialized.hashCode(), original.hashCode()); assertNotSame(deserialized, original); @@ -99,34 +95,15 @@ public class QueryRescoreBuilderTests extends ESTestCase { */ public void testEqualsAndHashcode() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { - RescoreBuilder firstBuilder = randomRescoreBuilder(); - assertFalse("rescore builder is equal to null", firstBuilder.equals(null)); - assertFalse("rescore builder is equal to incompatible type", firstBuilder.equals("")); - assertTrue("rescore builder is not equal to self", firstBuilder.equals(firstBuilder)); - assertThat("same rescore builder's hashcode returns different values if called multiple times", firstBuilder.hashCode(), - equalTo(firstBuilder.hashCode())); - assertThat("different rescore builder should not be equal", mutate(firstBuilder), not(equalTo(firstBuilder))); - - RescoreBuilder secondBuilder = serializedCopy(firstBuilder); - assertTrue("rescore builder is not equal to self", secondBuilder.equals(secondBuilder)); - assertTrue("rescore builder is not equal to its copy", firstBuilder.equals(secondBuilder)); - assertTrue("equals is not symmetric", secondBuilder.equals(firstBuilder)); - assertThat("rescore builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), - equalTo(firstBuilder.hashCode())); - - RescoreBuilder thirdBuilder = serializedCopy(secondBuilder); - assertTrue("rescore builder is not equal to self", thirdBuilder.equals(thirdBuilder)); - assertTrue("rescore builder is not equal to its copy", secondBuilder.equals(thirdBuilder)); - assertThat("rescore builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), - equalTo(thirdBuilder.hashCode())); - assertTrue("equals is not transitive", firstBuilder.equals(thirdBuilder)); - assertThat("rescore builder copy's hashcode is different from original hashcode", firstBuilder.hashCode(), - equalTo(thirdBuilder.hashCode())); - assertTrue("equals is not symmetric", thirdBuilder.equals(secondBuilder)); - assertTrue("equals is not symmetric", thirdBuilder.equals(firstBuilder)); + checkEqualsAndHashCode(randomRescoreBuilder(), this::copy, QueryRescoreBuilderTests::mutate); } } + private RescoreBuilder copy(RescoreBuilder original) throws IOException { + return copyWriteable(original, namedWriteableRegistry, + namedWriteableRegistry.getReader(RescoreBuilder.class, original.getWriteableName())); + } + /** * creates random rescorer, renders it to xContent and back to new instance that should be equal to original */ @@ -156,12 +133,13 @@ public class QueryRescoreBuilderTests extends ESTestCase { * than the test builder */ public void testBuildRescoreSearchContext() throws ElasticsearchParseException, IOException { + final long nowInMillis = randomPositiveLong(); Settings indexSettings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAsciiOfLengthBetween(1, 10), indexSettings); // shard context will only need indicesQueriesRegistry for building Query objects nested in query rescorer - QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, indicesQueriesRegistry, - null, null, null) { + QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, null, null, null, null, null, indicesQueriesRegistry, + null, null, null, () -> nowInMillis) { @Override public MappedFieldType fieldMapper(String name) { TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name); @@ -281,7 +259,7 @@ public class QueryRescoreBuilderTests extends ESTestCase { } private static RescoreBuilder mutate(RescoreBuilder original) throws IOException { - RescoreBuilder mutation = serializedCopy(original); + RescoreBuilder mutation = ESTestCase.copyWriteable(original, namedWriteableRegistry, QueryRescorerBuilder::new); if (randomBoolean()) { Integer windowSize = original.windowSize(); if (windowSize != null) { @@ -338,14 +316,4 @@ public class QueryRescoreBuilderTests extends ESTestCase { } return rescorer; } - - private static RescoreBuilder serializedCopy(RescoreBuilder original) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.writeNamedWriteable(original); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - return in.readNamedWriteable(RescoreBuilder.class); - } - } - } - } diff --git a/core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java index 6422bf7a134..02fde4d9717 100644 --- a/core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -26,7 +26,7 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -104,9 +104,11 @@ public class ScriptQuerySearchIT extends ESIntegTestCase { logger.info("running doc['num1'].value > 1"); SearchResponse response = client().prepareSearch() - .setQuery(scriptQuery(new Script("doc['num1'].value > 1", ScriptType.INLINE, CustomScriptPlugin.NAME, null))) + .setQuery(scriptQuery( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap()))) .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script("doc['num1'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) .get(); assertThat(response.getHits().totalHits(), equalTo(2L)); @@ -121,9 +123,10 @@ public class ScriptQuerySearchIT extends ESIntegTestCase { logger.info("running doc['num1'].value > param1"); response = client() .prepareSearch() - .setQuery(scriptQuery(new Script("doc['num1'].value > param1", ScriptType.INLINE, CustomScriptPlugin.NAME, params))) + .setQuery(scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params))) .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script("doc['num1'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) .get(); assertThat(response.getHits().totalHits(), equalTo(1L)); @@ -135,9 +138,10 @@ public class ScriptQuerySearchIT extends ESIntegTestCase { logger.info("running doc['num1'].value > param1"); response = client() .prepareSearch() - .setQuery(scriptQuery(new Script("doc['num1'].value > param1", ScriptType.INLINE, CustomScriptPlugin.NAME, params))) + .setQuery(scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params))) .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script("doc['num1'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) .get(); assertThat(response.getHits().totalHits(), equalTo(3L)); diff --git a/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java b/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java index 0ece1e5d83e..f05f9206836 100644 --- a/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java +++ b/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java @@ -41,8 +41,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; -/** - */ public class DuelScrollIT extends ESIntegTestCase { public void testDuelQueryThenFetch() throws Exception { TestContext context = create(SearchType.DFS_QUERY_THEN_FETCH, SearchType.QUERY_THEN_FETCH); diff --git a/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java index 2a42ec3530b..5b34dbc55af 100644 --- a/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java +++ b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java @@ -37,8 +37,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; -/** - */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class SearchScrollWithFailingNodesIT extends ESIntegTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java b/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java index 967c1c68134..0c96eb15e0e 100644 --- a/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java @@ -21,10 +21,7 @@ package org.elasticsearch.search.searchafter; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -41,7 +38,9 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import java.io.IOException; -import static org.hamcrest.Matchers.equalTo; +import java.util.Collections; + +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; public class SearchAfterBuilderTests extends ESTestCase { private static final int NUMBER_OF_TESTBUILDERS = 20; @@ -62,7 +61,7 @@ public class SearchAfterBuilderTests extends ESTestCase { indicesQueriesRegistry = null; } - private SearchAfterBuilder randomSearchFromBuilder() throws IOException { + private static SearchAfterBuilder randomSearchAfterBuilder() throws IOException { int numSearchFrom = randomIntBetween(1, 10); SearchAfterBuilder searchAfterBuilder = new SearchAfterBuilder(); Object[] values = new Object[numSearchFrom]; @@ -109,7 +108,7 @@ public class SearchAfterBuilderTests extends ESTestCase { // ensure that every number type remain the same before/after xcontent (de)serialization. // This is not a problem because the final type of each field value is extracted from associated sort field. // This little trick ensure that equals and hashcode are the same when using the xcontent serialization. - private SearchAfterBuilder randomJsonSearchFromBuilder() throws IOException { + private static SearchAfterBuilder randomJsonSearchFromBuilder() throws IOException { int numSearchAfter = randomIntBetween(1, 10); XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); jsonBuilder.startObject(); @@ -159,17 +158,12 @@ public class SearchAfterBuilderTests extends ESTestCase { } private static SearchAfterBuilder serializedCopy(SearchAfterBuilder original) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - original.writeTo(output); - try (StreamInput in = output.bytes().streamInput()) { - return new SearchAfterBuilder(in); - } - } + return copyWriteable(original, new NamedWriteableRegistry(Collections.emptyList()), SearchAfterBuilder::new); } public void testSerialization() throws Exception { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { - SearchAfterBuilder original = randomSearchFromBuilder(); + SearchAfterBuilder original = randomSearchAfterBuilder(); SearchAfterBuilder deserialized = serializedCopy(original); assertEquals(deserialized, original); assertEquals(deserialized.hashCode(), original.hashCode()); @@ -179,30 +173,8 @@ public class SearchAfterBuilderTests extends ESTestCase { public void testEqualsAndHashcode() throws Exception { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { - SearchAfterBuilder firstBuilder = randomSearchFromBuilder(); - assertFalse("searchFrom is equal to null", firstBuilder.equals(null)); - assertFalse("searchFrom is equal to incompatible type", firstBuilder.equals("")); - assertTrue("searchFrom is not equal to self", firstBuilder.equals(firstBuilder)); - assertThat("same searchFrom's hashcode returns different values if called multiple times", firstBuilder.hashCode(), - equalTo(firstBuilder.hashCode())); - - SearchAfterBuilder secondBuilder = serializedCopy(firstBuilder); - assertTrue("searchFrom is not equal to self", secondBuilder.equals(secondBuilder)); - assertTrue("searchFrom is not equal to its copy", firstBuilder.equals(secondBuilder)); - assertTrue("equals is not symmetric", secondBuilder.equals(firstBuilder)); - assertThat("searchFrom copy's hashcode is different from original hashcode", secondBuilder.hashCode(), - equalTo(firstBuilder.hashCode())); - - SearchAfterBuilder thirdBuilder = serializedCopy(secondBuilder); - assertTrue("searchFrom is not equal to self", thirdBuilder.equals(thirdBuilder)); - assertTrue("searchFrom is not equal to its copy", secondBuilder.equals(thirdBuilder)); - assertThat("searchFrom copy's hashcode is different from original hashcode", secondBuilder.hashCode(), - equalTo(thirdBuilder.hashCode())); - assertTrue("equals is not transitive", firstBuilder.equals(thirdBuilder)); - assertThat("searchFrom copy's hashcode is different from original hashcode", firstBuilder.hashCode(), - equalTo(thirdBuilder.hashCode())); - assertTrue("searchFrom is not symmetric", thirdBuilder.equals(secondBuilder)); - assertTrue("searchFrom is not symmetric", thirdBuilder.equals(firstBuilder)); + // TODO add equals tests with mutating the original object + checkEqualsAndHashCode(randomSearchAfterBuilder(), SearchAfterBuilderTests::serializedCopy); } } @@ -249,18 +221,18 @@ public class SearchAfterBuilderTests extends ESTestCase { } /** - * Explicitly tests what you can't list as a sortValue. What you can list is tested by {@link #randomSearchFromBuilder()}. + * Explicitly tests what you can't list as a sortValue. What you can list is tested by {@link #randomSearchAfterBuilder()}. */ public void testBadTypes() throws IOException { randomSearchFromBuilderWithSortValueThrows(new Object()); randomSearchFromBuilderWithSortValueThrows(new GeoPoint(0, 0)); - randomSearchFromBuilderWithSortValueThrows(randomSearchFromBuilder()); + randomSearchFromBuilderWithSortValueThrows(randomSearchAfterBuilder()); randomSearchFromBuilderWithSortValueThrows(this); } - private void randomSearchFromBuilderWithSortValueThrows(Object containing) throws IOException { + private static void randomSearchFromBuilderWithSortValueThrows(Object containing) throws IOException { // Get a valid one - SearchAfterBuilder builder = randomSearchFromBuilder(); + SearchAfterBuilder builder = randomSearchAfterBuilder(); // Now replace its values with one containing the passed in object Object[] values = builder.getSortValues(); values[between(0, values.length - 1)] = containing; diff --git a/core/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index 695c926976b..70e3ccf968f 100644 --- a/core/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -25,21 +25,18 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.UidFieldMapper; @@ -53,16 +50,18 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import java.io.IOException; -import java.util.List; import java.util.ArrayList; -import java.util.Map; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.containsString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -85,19 +84,23 @@ public class SliceBuilderTests extends ESTestCase { indicesQueriesRegistry = null; } - private SliceBuilder randomSliceBuilder() throws IOException { + private static SliceBuilder randomSliceBuilder() throws IOException { int max = randomIntBetween(2, MAX_SLICE); - int id = randomInt(max - 1); + int id = randomIntBetween(1, max - 1); String field = randomAsciiOfLengthBetween(5, 20); return new SliceBuilder(field, id, max); } private static SliceBuilder serializedCopy(SliceBuilder original) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - original.writeTo(output); - try (StreamInput in = output.bytes().streamInput()) { - return new SliceBuilder(in); - } + return copyWriteable(original, new NamedWriteableRegistry(Collections.emptyList()), SliceBuilder::new); + } + + private static SliceBuilder mutate(SliceBuilder original) throws IOException { + switch (randomIntBetween(0, 2)) { + case 0: return new SliceBuilder(original.getField() + "_xyz", original.getId(), original.getMax()); + case 1: return new SliceBuilder(original.getField(), original.getId() - 1, original.getMax()); + case 2: + default: return new SliceBuilder(original.getField(), original.getId(), original.getMax() + 1); } } @@ -110,29 +113,7 @@ public class SliceBuilderTests extends ESTestCase { } public void testEqualsAndHashcode() throws Exception { - SliceBuilder firstBuilder = randomSliceBuilder(); - assertFalse("sliceBuilder is equal to null", firstBuilder.equals(null)); - assertFalse("sliceBuilder is equal to incompatible type", firstBuilder.equals("")); - assertTrue("sliceBuilder is not equal to self", firstBuilder.equals(firstBuilder)); - assertThat("same searchFrom's hashcode returns different values if called multiple times", - firstBuilder.hashCode(), equalTo(firstBuilder.hashCode())); - - SliceBuilder secondBuilder = serializedCopy(firstBuilder); - assertTrue("sliceBuilder is not equal to self", secondBuilder.equals(secondBuilder)); - assertTrue("sliceBuilder is not equal to its copy", firstBuilder.equals(secondBuilder)); - assertTrue("equals is not symmetric", secondBuilder.equals(firstBuilder)); - assertThat("sliceBuilder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), - equalTo(firstBuilder.hashCode())); - SliceBuilder thirdBuilder = serializedCopy(secondBuilder); - assertTrue("sliceBuilder is not equal to self", thirdBuilder.equals(thirdBuilder)); - assertTrue("sliceBuilder is not equal to its copy", secondBuilder.equals(thirdBuilder)); - assertThat("sliceBuilder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), - equalTo(thirdBuilder.hashCode())); - assertTrue("equals is not transitive", firstBuilder.equals(thirdBuilder)); - assertThat("sliceBuilder copy's hashcode is different from original hashcode", firstBuilder.hashCode(), - equalTo(thirdBuilder.hashCode())); - assertTrue("sliceBuilder is not symmetric", thirdBuilder.equals(secondBuilder)); - assertTrue("sliceBuilder is not symmetric", thirdBuilder.equals(firstBuilder)); + checkEqualsAndHashCode(randomSliceBuilder(), SliceBuilderTests::serializedCopy, SliceBuilderTests::mutate); } public void testFromXContent() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java index 84f83c46e1a..006f69e4774 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java @@ -24,11 +24,8 @@ import org.apache.lucene.util.Accountable; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -42,10 +39,10 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.ContentPath; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.LegacyDoubleFieldMapper.DoubleFieldType; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper.BuilderContext; +import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.ObjectMapper.Nested; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -64,6 +61,7 @@ import org.elasticsearch.script.ScriptEngineRegistry; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptServiceTests.TestEngineService; import org.elasticsearch.script.ScriptSettings; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; @@ -78,8 +76,7 @@ import java.util.Collections; import java.util.Map; import static java.util.Collections.emptyList; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; public abstract class AbstractSortTestCase> extends ESTestCase { @@ -180,7 +177,7 @@ public abstract class AbstractSortTestCase> extends EST public void testSerialization() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { T testsort = createTestItem(); - T deserializedsort = copyItem(testsort); + T deserializedsort = copy(testsort); assertEquals(testsort, deserializedsort); assertEquals(testsort.hashCode(), deserializedsort.hashCode()); assertNotSame(testsort, deserializedsort); @@ -192,29 +189,7 @@ public abstract class AbstractSortTestCase> extends EST */ public void testEqualsAndHashcode() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { - T firstsort = createTestItem(); - assertFalse("sort is equal to null", firstsort.equals(null)); - assertFalse("sort is equal to incompatible type", firstsort.equals("")); - assertTrue("sort is not equal to self", firstsort.equals(firstsort)); - assertThat("same sort's hashcode returns different values if called multiple times", firstsort.hashCode(), - equalTo(firstsort.hashCode())); - assertThat("different sorts should not be equal", mutate(firstsort), not(equalTo(firstsort))); - assertThat("different sorts should have different hashcode", mutate(firstsort).hashCode(), not(equalTo(firstsort.hashCode()))); - - T secondsort = copyItem(firstsort); - assertTrue("sort is not equal to self", secondsort.equals(secondsort)); - assertTrue("sort is not equal to its copy", firstsort.equals(secondsort)); - assertTrue("equals is not symmetric", secondsort.equals(firstsort)); - assertThat("sort copy's hashcode is different from original hashcode", secondsort.hashCode(), equalTo(firstsort.hashCode())); - - T thirdsort = copyItem(secondsort); - assertTrue("sort is not equal to self", thirdsort.equals(thirdsort)); - assertTrue("sort is not equal to its copy", secondsort.equals(thirdsort)); - assertThat("sort copy's hashcode is different from original hashcode", secondsort.hashCode(), equalTo(thirdsort.hashCode())); - assertTrue("equals is not transitive", firstsort.equals(thirdsort)); - assertThat("sort copy's hashcode is different from original hashcode", firstsort.hashCode(), equalTo(thirdsort.hashCode())); - assertTrue("equals is not symmetric", thirdsort.equals(secondsort)); - assertTrue("equals is not symmetric", thirdsort.equals(firstsort)); + checkEqualsAndHashCode(createTestItem(), this::copy, this::mutate); } } @@ -235,8 +210,9 @@ public abstract class AbstractSortTestCase> extends EST public void onCache(ShardId shardId, Accountable accountable) { } }); - return new QueryShardContext(idxSettings, bitsetFilterCache, ifds, null, null, scriptService, - indicesQueriesRegistry, null, null, null) { + long nowInMillis = randomPositiveLong(); + return new QueryShardContext(0, idxSettings, bitsetFilterCache, ifds, null, null, scriptService, + indicesQueriesRegistry, null, null, null, () -> nowInMillis) { @Override public MappedFieldType fieldMapper(String name) { return provideMappedFieldType(name); @@ -245,7 +221,7 @@ public abstract class AbstractSortTestCase> extends EST @Override public ObjectMapper getObjectMapper(String name) { BuilderContext context = new BuilderContext(this.getIndexSettings().getSettings(), new ContentPath()); - return (ObjectMapper) new ObjectMapper.Builder<>(name).nested(Nested.newNested(false, false)).build(context); + return new ObjectMapper.Builder<>(name).nested(Nested.newNested(false, false)).build(context); } }; } @@ -274,12 +250,8 @@ public abstract class AbstractSortTestCase> extends EST } @SuppressWarnings("unchecked") - private T copyItem(T original) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - original.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - return (T) namedWriteableRegistry.getReader(SortBuilder.class, original.getWriteableName()).read(in); - } - } + private T copy(T original) throws IOException { + return copyWriteable(original, namedWriteableRegistry, + (Writeable.Reader) namedWriteableRegistry.getReader(SortBuilder.class, original.getWriteableName())); } } diff --git a/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java index baaf3ac5d3c..408a7db2029 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.sort; import org.apache.lucene.search.SortField; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; @@ -140,7 +139,7 @@ public class FieldSortBuilderTests extends AbstractSortTestCase qHashes, List qPoints) { diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java index 4dd14cc523f..6fbd473f7ea 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java @@ -20,8 +20,7 @@ package org.elasticsearch.search.sort; -import org.apache.lucene.queryparser.xml.builders.MatchAllDocsQueryBuilder; -import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.search.SortField; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseFieldMatcher; @@ -33,11 +32,12 @@ import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.GeoPointFieldMapper; +import org.elasticsearch.index.mapper.LatLonPointFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.GeoValidationMethod; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.test.geo.RandomGeoGenerator; @@ -110,7 +110,7 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase plain SortField with a custom comparator + + builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1)); + builder.unit(DistanceUnit.KILOMETERS); + sort = builder.build(context); + assertEquals(SortField.class, sort.field.getClass()); // km rather than m -> plain SortField with a custom comparator + + builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1)); + builder.order(SortOrder.DESC); + sort = builder.build(context); + assertEquals(SortField.class, sort.field.getClass()); // descending means the max value should be considered rather than min + + builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1)); + builder.setNestedPath("some_nested_path"); + sort = builder.build(context); + assertEquals(SortField.class, sort.field.getClass()); // can't use LatLon optimized sorting with nested fields + + builder = new GeoDistanceSortBuilder("random_field_name", new GeoPoint(3.5, 2.1)); + builder.order(SortOrder.DESC); + sort = builder.build(context); + assertEquals(SortField.class, sort.field.getClass()); // can't use LatLon optimized sorting with DESC sorting + } } diff --git a/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java index 94e0054b397..bffa1326630 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java @@ -22,13 +22,10 @@ package org.elasticsearch.search.sort; import org.apache.lucene.search.SortField; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.DocValueFormat; -import org.junit.Rule; -import org.junit.rules.ExpectedException; import java.io.IOException; @@ -50,16 +47,12 @@ public class ScoreSortBuilderTests extends AbstractSortTestCase new ScoreSortBuilder().order(null)); + assertEquals("sort order cannot be null.", e.getMessage()); } /** @@ -93,7 +86,7 @@ public class ScoreSortBuilderTests extends AbstractSortTestCase ScriptSortType.fromString(null)); + assertEquals("input string is null", e.getMessage()); } public void testScriptSortTypeIllegalArgument() { - exceptionRule.expect(IllegalArgumentException.class); - exceptionRule.expectMessage("Unknown ScriptSortType [xyz]"); - ScriptSortType.fromString("xyz"); + Exception e = expectThrows(IllegalArgumentException.class, () -> ScriptSortType.fromString("xyz")); + assertEquals("Unknown ScriptSortType [xyz]", e.getMessage()); } public void testParseJson() throws IOException { @@ -181,7 +174,7 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase ScriptSortBuilder.fromXContent(context, null)); + assertEquals("[_script] unknown field [bad_field], parser not found", e.getMessage()); } public void testParseBadFieldNameExceptionsOnStartObject() throws IOException { @@ -240,9 +232,8 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase ScriptSortBuilder.fromXContent(context, null)); + assertEquals("[_script] unknown field [bad_field], parser not found", e.getMessage()); } public void testParseUnexpectedToken() throws IOException { @@ -253,9 +244,8 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase ScriptSortBuilder.fromXContent(context, null)); + assertEquals("[_script] script doesn't support values of type: START_ARRAY", e.getMessage()); } /** @@ -263,9 +253,9 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase builder.sortMode(SortMode.fromString(sortMode))); + assertEquals("script sort of type [string] doesn't support mode [" + sortMode + "]", e.getMessage()); } @Override diff --git a/core/src/test/java/org/elasticsearch/search/sort/SimpleSortIT.java b/core/src/test/java/org/elasticsearch/search/sort/SimpleSortIT.java index 24a82526eda..d73da7e5076 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/SimpleSortIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/SimpleSortIT.java @@ -48,7 +48,8 @@ import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.script.ScriptService.ScriptType; + +import org.elasticsearch.script.ScriptType; import static org.elasticsearch.search.sort.SortBuilders.scriptSort; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -182,7 +183,7 @@ public class SimpleSortIT extends ESIntegTestCase { // STRING script int size = 1 + random.nextInt(10); - Script script = new Script("doc['str_value'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_value'].value", Collections.emptyMap()); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -274,7 +275,7 @@ public class SimpleSortIT extends ESIntegTestCase { // test the long values SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addScriptField("min", new Script("get min long", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("min", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min long", Collections.emptyMap())) .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")) .setSize(10) .get(); @@ -290,7 +291,7 @@ public class SimpleSortIT extends ESIntegTestCase { // test the double values searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addScriptField("min", new Script("get min double", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("min", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min double", Collections.emptyMap())) .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")) .setSize(10) .get(); @@ -306,7 +307,7 @@ public class SimpleSortIT extends ESIntegTestCase { // test the string values searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addScriptField("min", new Script("get min string", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("min", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min string", Collections.emptyMap())) .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")) .setSize(10) .get(); @@ -322,7 +323,8 @@ public class SimpleSortIT extends ESIntegTestCase { // test the geopoint values searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addScriptField("min", new Script("get min geopoint lon", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("min", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get min geopoint lon", Collections.emptyMap())) .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")) .setSize(10) .get(); @@ -380,7 +382,7 @@ public class SimpleSortIT extends ESIntegTestCase { flush(); refresh(); - Script scripField = new Script("doc['id'].value", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script scripField = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['id'].value", Collections.emptyMap()); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -397,7 +399,7 @@ public class SimpleSortIT extends ESIntegTestCase { searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addScriptField("id", new Script("doc['id'].values[0]", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['id'].values[0]", Collections.emptyMap())) .addSort("svalue", SortOrder.ASC) .get(); @@ -465,7 +467,7 @@ public class SimpleSortIT extends ESIntegTestCase { } refresh(); - Script sortScript = new Script("\u0027\u0027", ScriptType.INLINE, CustomScriptPlugin.NAME, null); + Script sortScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "\u0027\u0027", Collections.emptyMap()); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .addSort(scriptSort(sortScript, ScriptSortType.STRING)) diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortModeTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortModeTests.java index 29deb6dd76d..e0329347dba 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/SortModeTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/SortModeTests.java @@ -20,16 +20,11 @@ package org.elasticsearch.search.sort; import org.elasticsearch.test.ESTestCase; -import org.junit.Rule; -import org.junit.rules.ExpectedException; import java.util.Locale; public class SortModeTests extends ESTestCase { - @Rule - public ExpectedException exceptionRule = ExpectedException.none(); - public void testSortMode() { // we rely on these ordinals in serialization, so changing them breaks bwc. assertEquals(0, SortMode.MIN.ordinal()); @@ -50,16 +45,11 @@ public class SortModeTests extends ESTestCase { } } - public void testParseNull() { - exceptionRule.expect(NullPointerException.class); - exceptionRule.expectMessage("input string is null"); - SortMode.fromString(null); - } + public void testParsingFromStringExceptions() { + Exception e = expectThrows(NullPointerException.class, () -> SortMode.fromString(null)); + assertEquals("input string is null", e.getMessage()); - public void testIllegalArgument() { - exceptionRule.expect(IllegalArgumentException.class); - exceptionRule.expectMessage("Unknown SortMode [xyz]"); - SortMode.fromString("xyz"); + e = expectThrows(IllegalArgumentException.class, () -> SortMode.fromString("xyz")); + assertEquals("Unknown SortMode [xyz]", e.getMessage()); } - } diff --git a/core/src/test/java/org/elasticsearch/search/stats/SearchStatsIT.java b/core/src/test/java/org/elasticsearch/search/stats/SearchStatsIT.java index 1c296a3724a..83fb38f18a2 100644 --- a/core/src/test/java/org/elasticsearch/search/stats/SearchStatsIT.java +++ b/core/src/test/java/org/elasticsearch/search/stats/SearchStatsIT.java @@ -34,7 +34,7 @@ import org.elasticsearch.index.search.stats.SearchStats.Stats; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.test.ESIntegTestCase; @@ -121,7 +121,8 @@ public class SearchStatsIT extends ESIntegTestCase { SearchResponse searchResponse = internalCluster().coordOnlyNodeClient().prepareSearch() .setQuery(QueryBuilders.termQuery("field", "value")).setStats("group1", "group2") .highlighter(new HighlightBuilder().field("field")) - .addScriptField("script1", new Script("_source.field", ScriptType.INLINE, CustomScriptPlugin.NAME, null)) + .addScriptField("script1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.field", Collections.emptyMap())) .setSize(100) .execute().actionGet(); assertHitCount(searchResponse, docsTest1 + docsTest2); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java b/core/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java index d1fc671d322..4a5b24d507e 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java @@ -20,10 +20,8 @@ package org.elasticsearch.search.suggest; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -41,8 +39,7 @@ import org.junit.BeforeClass; import java.io.IOException; import static java.util.Collections.emptyList; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; public abstract class AbstractSuggestionBuilderTestCase> extends ESTestCase { @@ -77,7 +74,7 @@ public abstract class AbstractSuggestionBuilderTestCase) namedWriteableRegistry.getReader(SuggestionBuilder.class, original.getWriteableName())); } protected static QueryParseContext newParseContext(final String xcontent) throws IOException { diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestBuilderTests.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestBuilderTests.java index badcce9250f..4aa7e67744b 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/SuggestBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestBuilderTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.suggest; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -33,9 +32,9 @@ import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.suggest.completion.CompletionSuggesterBuilderTests; -import org.elasticsearch.search.suggest.completion.WritableTestCase; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilderTests; import org.elasticsearch.search.suggest.term.TermSuggestionBuilderTests; +import org.elasticsearch.test.ESTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -43,9 +42,11 @@ import java.io.IOException; import java.util.Map.Entry; import static java.util.Collections.emptyList; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; -public class SuggestBuilderTests extends WritableTestCase { +public class SuggestBuilderTests extends ESTestCase { + private static final int NUMBER_OF_RUNS = 20; private static NamedWriteableRegistry namedWriteableRegistry; private static Suggesters suggesters; @@ -65,17 +66,12 @@ public class SuggestBuilderTests extends WritableTestCase { suggesters = null; } - @Override - protected NamedWriteableRegistry provideNamedWritableRegistry() { - return namedWriteableRegistry; - } - /** * creates random suggestion builder, renders it to xContent and back to new instance that should be equal to original */ public void testFromXContent() throws IOException { for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) { - SuggestBuilder suggestBuilder = createTestModel(); + SuggestBuilder suggestBuilder = randomSuggestBuilder(); XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { xContentBuilder.prettyPrint(); @@ -90,6 +86,30 @@ public class SuggestBuilderTests extends WritableTestCase { } } + /** + * Test equality and hashCode properties + */ + public void testEqualsAndHashcode() throws IOException { + for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) { + checkEqualsAndHashCode(randomSuggestBuilder(), original -> { + return copyWriteable(original, namedWriteableRegistry, SuggestBuilder::new); + }, this::createMutation); + } + } + + /** + * Test serialization and deserialization + */ + public void testSerialization() throws IOException { + for (int i = 0; i < NUMBER_OF_RUNS; i++) { + SuggestBuilder suggestBuilder = randomSuggestBuilder(); + SuggestBuilder deserializedModel = copyWriteable(suggestBuilder, namedWriteableRegistry, SuggestBuilder::new); + assertEquals(suggestBuilder, deserializedModel); + assertEquals(suggestBuilder.hashCode(), deserializedModel.hashCode()); + assertNotSame(suggestBuilder, deserializedModel); + } + } + public void testIllegalSuggestionName() { try { new SuggestBuilder().addSuggestion(null, PhraseSuggestionBuilderTests.randomPhraseSuggestionBuilder()); @@ -107,12 +127,6 @@ public class SuggestBuilderTests extends WritableTestCase { } } - @Override - protected SuggestBuilder createTestModel() { - return randomSuggestBuilder(); - } - - @Override protected SuggestBuilder createMutation(SuggestBuilder original) throws IOException { SuggestBuilder mutation = new SuggestBuilder().setGlobalText(original.getGlobalText()); for (Entry> suggestionBuilder : original.getSuggestions().entrySet()) { @@ -126,11 +140,6 @@ public class SuggestBuilderTests extends WritableTestCase { return mutation; } - @Override - protected SuggestBuilder readFrom(StreamInput in) throws IOException { - return new SuggestBuilder(in); - } - public static SuggestBuilder randomSuggestBuilder() { SuggestBuilder builder = new SuggestBuilder(); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/FuzzyOptionsTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/FuzzyOptionsTests.java index 2a378bf1d78..4df7b9bf415 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/FuzzyOptionsTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/FuzzyOptionsTests.java @@ -19,12 +19,20 @@ package org.elasticsearch.search.suggest.completion; -import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; -public class FuzzyOptionsTests extends WritableTestCase { +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; + +public class FuzzyOptionsTests extends ESTestCase { + + private static final int NUMBER_OF_RUNS = 20; public static FuzzyOptions randomFuzzyOptions() { final FuzzyOptions.Builder builder = FuzzyOptions.builder(); @@ -41,49 +49,45 @@ public class FuzzyOptionsTests extends WritableTestCase { return builder.build(); } - @Override - protected FuzzyOptions createTestModel() { - return randomFuzzyOptions(); - } - - @Override protected FuzzyOptions createMutation(FuzzyOptions original) throws IOException { final FuzzyOptions.Builder builder = FuzzyOptions.builder(); - builder.setFuzziness(original.getEditDistance()) - .setFuzzyPrefixLength(original.getFuzzyPrefixLength()) - .setFuzzyMinLength(original.getFuzzyMinLength()) - .setMaxDeterminizedStates(original.getMaxDeterminizedStates()) - .setTranspositions(original.isTranspositions()) - .setUnicodeAware(original.isUnicodeAware()); - switch (randomIntBetween(0, 5)) { - case 0: - builder.setFuzziness(randomValueOtherThan(original.getEditDistance(), () -> randomFrom(0, 1, 2))); - break; - case 1: - builder.setFuzzyPrefixLength(randomValueOtherThan(original.getFuzzyPrefixLength(), () -> - randomIntBetween(1, 3))); - break; - case 2: - builder.setFuzzyMinLength(randomValueOtherThan(original.getFuzzyMinLength(), () -> - randomIntBetween(1, 3))); - break; - case 3: - builder.setMaxDeterminizedStates(randomValueOtherThan(original.getMaxDeterminizedStates(), () -> - randomIntBetween(1, 10))); - break; - case 4: - builder.setTranspositions(!original.isTranspositions()); - break; - case 5: - builder.setUnicodeAware(!original.isUnicodeAware()); - break; - } + builder.setFuzziness(original.getEditDistance()).setFuzzyPrefixLength(original.getFuzzyPrefixLength()) + .setFuzzyMinLength(original.getFuzzyMinLength()).setMaxDeterminizedStates(original.getMaxDeterminizedStates()) + .setTranspositions(original.isTranspositions()).setUnicodeAware(original.isUnicodeAware()); + List mutators = new ArrayList<>(); + mutators.add(() -> builder.setFuzziness(randomValueOtherThan(original.getEditDistance(), () -> randomFrom(0, 1, 2)))); + + mutators.add( + () -> builder.setFuzzyPrefixLength(randomValueOtherThan(original.getFuzzyPrefixLength(), () -> randomIntBetween(1, 3)))); + mutators.add(() -> builder.setFuzzyMinLength(randomValueOtherThan(original.getFuzzyMinLength(), () -> randomIntBetween(1, 3)))); + mutators.add(() -> builder + .setMaxDeterminizedStates(randomValueOtherThan(original.getMaxDeterminizedStates(), () -> randomIntBetween(1, 10)))); + mutators.add(() -> builder.setTranspositions(!original.isTranspositions())); + mutators.add(() -> builder.setUnicodeAware(!original.isUnicodeAware())); + randomFrom(mutators).run(); return builder.build(); } - @Override - protected FuzzyOptions readFrom(StreamInput in) throws IOException { - return new FuzzyOptions(in); + /** + * Test serialization and deserialization + */ + public void testSerialization() throws IOException { + for (int i = 0; i < NUMBER_OF_RUNS; i++) { + FuzzyOptions testModel = randomFuzzyOptions(); + FuzzyOptions deserializedModel = copyWriteable(testModel, new NamedWriteableRegistry(Collections.emptyList()), + FuzzyOptions::new); + assertEquals(testModel, deserializedModel); + assertEquals(testModel.hashCode(), deserializedModel.hashCode()); + assertNotSame(testModel, deserializedModel); + } + } + + public void testEqualsAndHashCode() throws IOException { + for (int i = 0; i < NUMBER_OF_RUNS; i++) { + checkEqualsAndHashCode(randomFuzzyOptions(), + original -> copyWriteable(original, new NamedWriteableRegistry(Collections.emptyList()), FuzzyOptions::new), + this::createMutation); + } } public void testIllegalArguments() { diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/RegexOptionsTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/RegexOptionsTests.java index 81df2dfd7da..a133ddf3702 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/RegexOptionsTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/RegexOptionsTests.java @@ -19,12 +19,16 @@ package org.elasticsearch.search.suggest.completion; -import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.index.query.RegexpFlag; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Collections; -public class RegexOptionsTests extends WritableTestCase { +public class RegexOptionsTests extends ESTestCase { + + private static final int NUMBER_OF_RUNS = 20; public static RegexOptions randomRegexOptions() { final RegexOptions.Builder builder = RegexOptions.builder(); @@ -42,21 +46,24 @@ public class RegexOptionsTests extends WritableTestCase { return builder.build(); } - @Override - protected RegexOptions createTestModel() { - return randomRegexOptions(); - } - - @Override protected RegexOptions createMutation(RegexOptions original) throws IOException { final RegexOptions.Builder builder = RegexOptions.builder(); builder.setMaxDeterminizedStates(randomValueOtherThan(original.getMaxDeterminizedStates(), () -> randomIntBetween(1, 10))); return builder.build(); } - @Override - protected RegexOptions readFrom(StreamInput in) throws IOException { - return new RegexOptions(in); + /** + * Test serialization and deserialization + */ + public void testSerialization() throws IOException { + for (int i = 0; i < NUMBER_OF_RUNS; i++) { + RegexOptions testOptions = randomRegexOptions(); + RegexOptions deserializedModel = copyWriteable(testOptions, new NamedWriteableRegistry(Collections.emptyList()), + RegexOptions::new); + assertEquals(testOptions, deserializedModel); + assertEquals(testOptions.hashCode(), deserializedModel.hashCode()); + assertNotSame(testOptions, deserializedModel); + } } public void testIllegalArgument() { diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/WritableTestCase.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/WritableTestCase.java deleted file mode 100644 index 9f6b452ce20..00000000000 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/WritableTestCase.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.suggest.completion; - -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.Collections; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; - -/** - * Base class for testing serialization and equality for - * {@link Writeable} models - */ -public abstract class WritableTestCase extends ESTestCase { - - protected static final int NUMBER_OF_RUNS = 20; - - /** - * create random model that is put under test - */ - protected abstract M createTestModel(); - - /** - * mutate the given model so the returned model is different - */ - protected abstract M createMutation(M original) throws IOException; - - /** - * Read from a stream. - */ - protected abstract M readFrom(StreamInput in) throws IOException; - - /** - * Test serialization and deserialization of the tested model. - */ - public void testSerialization() throws IOException { - for (int i = 0; i < NUMBER_OF_RUNS; i++) { - M testModel = createTestModel(); - M deserializedModel = copyModel(testModel); - assertEquals(testModel, deserializedModel); - assertEquals(testModel.hashCode(), deserializedModel.hashCode()); - assertNotSame(testModel, deserializedModel); - } - } - - /** - * Test equality and hashCode properties - */ - @SuppressWarnings("unchecked") - public void testEqualsAndHashcode() throws IOException { - M firstModel = createTestModel(); - String modelName = firstModel.getClass().getSimpleName(); - assertFalse(modelName + " is equal to null", firstModel.equals(null)); - assertFalse(modelName + " is equal to incompatible type", firstModel.equals("")); - assertTrue(modelName + " is not equal to self", firstModel.equals(firstModel)); - assertThat("same "+ modelName + "'s hashcode returns different values if called multiple times", firstModel.hashCode(), - equalTo(firstModel.hashCode())); - assertThat("different " + modelName + " should not be equal", createMutation(firstModel), not(equalTo(firstModel))); - - M secondModel = copyModel(firstModel); - assertTrue(modelName + " is not equal to self", secondModel.equals(secondModel)); - assertTrue(modelName + " is not equal to its copy", firstModel.equals(secondModel)); - assertTrue("equals is not symmetric", secondModel.equals(firstModel)); - assertThat(modelName + " copy's hashcode is different from original hashcode", secondModel.hashCode(), - equalTo(firstModel.hashCode())); - - M thirdModel = copyModel(secondModel); - assertTrue(modelName + " is not equal to self", thirdModel.equals(thirdModel)); - assertTrue(modelName + " is not equal to its copy", secondModel.equals(thirdModel)); - assertThat(modelName + " copy's hashcode is different from original hashcode", secondModel.hashCode(), - equalTo(thirdModel.hashCode())); - assertTrue("equals is not transitive", firstModel.equals(thirdModel)); - assertThat(modelName + " copy's hashcode is different from original hashcode", firstModel.hashCode(), - equalTo(thirdModel.hashCode())); - assertTrue(modelName + " equals is not symmetric", thirdModel.equals(secondModel)); - assertTrue(modelName + " equals is not symmetric", thirdModel.equals(firstModel)); - } - - private M copyModel(M original) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - original.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), provideNamedWritableRegistry())) { - return readFrom(in); - } - } - } - - protected NamedWriteableRegistry provideNamedWritableRegistry() { - return new NamedWriteableRegistry(Collections.emptyList()); - } -} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java index 846d3193f6d..aada48f79b7 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java @@ -21,8 +21,7 @@ package org.elasticsearch.search.suggest.phrase; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -35,7 +34,12 @@ import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCan import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import static org.hamcrest.Matchers.equalTo; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; public class DirectCandidateGeneratorTests extends ESTestCase{ @@ -50,7 +54,7 @@ public class DirectCandidateGeneratorTests extends ESTestCase{ public void testSerialization() throws IOException { for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) { DirectCandidateGeneratorBuilder original = randomCandidateGenerator(); - DirectCandidateGeneratorBuilder deserialized = serializedCopy(original); + DirectCandidateGeneratorBuilder deserialized = copy(original); assertEquals(deserialized, original); assertEquals(deserialized.hashCode(), original.hashCode()); assertNotSame(deserialized, original); @@ -62,49 +66,40 @@ public class DirectCandidateGeneratorTests extends ESTestCase{ */ public void testEqualsAndHashcode() throws IOException { for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) { - DirectCandidateGeneratorBuilder first = randomCandidateGenerator(); - assertFalse("generator is equal to null", first.equals(null)); - assertFalse("generator is equal to incompatible type", first.equals("")); - assertTrue("generator is not equal to self", first.equals(first)); - assertThat("same generator's hashcode returns different values if called multiple times", first.hashCode(), - equalTo(first.hashCode())); - - DirectCandidateGeneratorBuilder second = serializedCopy(first); - assertTrue("generator is not equal to self", second.equals(second)); - assertTrue("generator is not equal to its copy", first.equals(second)); - assertTrue("equals is not symmetric", second.equals(first)); - assertThat("generator copy's hashcode is different from original hashcode", second.hashCode(), equalTo(first.hashCode())); - - DirectCandidateGeneratorBuilder third = serializedCopy(second); - assertTrue("generator is not equal to self", third.equals(third)); - assertTrue("generator is not equal to its copy", second.equals(third)); - assertThat("generator copy's hashcode is different from original hashcode", second.hashCode(), equalTo(third.hashCode())); - assertTrue("equals is not transitive", first.equals(third)); - assertThat("generator copy's hashcode is different from original hashcode", first.hashCode(), equalTo(third.hashCode())); - assertTrue("equals is not symmetric", third.equals(second)); - assertTrue("equals is not symmetric", third.equals(first)); - - // test for non-equality, check that all fields are covered by changing one by one - first = new DirectCandidateGeneratorBuilder("aaa"); - assertEquals(first, serializedCopy(first)); - second = new DirectCandidateGeneratorBuilder("bbb"); - assertNotEquals(first, second); - assertNotEquals(first.accuracy(0.1f), serializedCopy(first).accuracy(0.2f)); - assertNotEquals(first.maxEdits(1), serializedCopy(first).maxEdits(2)); - assertNotEquals(first.maxInspections(1), serializedCopy(first).maxInspections(2)); - assertNotEquals(first.maxTermFreq(0.1f), serializedCopy(first).maxTermFreq(0.2f)); - assertNotEquals(first.minDocFreq(0.1f), serializedCopy(first).minDocFreq(0.2f)); - assertNotEquals(first.minWordLength(1), serializedCopy(first).minWordLength(2)); - assertNotEquals(first.postFilter("postFilter"), serializedCopy(first).postFilter("postFilter_other")); - assertNotEquals(first.preFilter("preFilter"), serializedCopy(first).preFilter("preFilter_other")); - assertNotEquals(first.prefixLength(1), serializedCopy(first).prefixLength(2)); - assertNotEquals(first.size(1), serializedCopy(first).size(2)); - assertNotEquals(first.sort("score"), serializedCopy(first).sort("frequency")); - assertNotEquals(first.stringDistance("levenstein"), serializedCopy(first).sort("ngram")); - assertNotEquals(first.suggestMode("missing"), serializedCopy(first).suggestMode("always")); + final DirectCandidateGeneratorBuilder original = randomCandidateGenerator(); + checkEqualsAndHashCode(original, DirectCandidateGeneratorTests::copy, DirectCandidateGeneratorTests::mutate); } } + private static DirectCandidateGeneratorBuilder mutate(DirectCandidateGeneratorBuilder original) throws IOException { + DirectCandidateGeneratorBuilder mutation = copy(original); + List> mutators = new ArrayList<>(); + mutators.add(() -> new DirectCandidateGeneratorBuilder(original.field() + "_other")); + mutators.add(() -> mutation.accuracy(original.accuracy() == null ? 0.1f : original.accuracy() + 0.1f)); + mutators.add(() -> { + Integer maxEdits = original.maxEdits() == null ? 1 : original.maxEdits(); + if (maxEdits == 1) { + maxEdits = 2; + } else { + maxEdits = 1; + } + return mutation.maxEdits(maxEdits); + }); + mutators.add(() -> mutation.maxInspections(original.maxInspections() == null ? 1 : original.maxInspections() + 1)); + mutators.add(() -> mutation.minWordLength(original.minWordLength() == null ? 1 : original.minWordLength() + 1)); + mutators.add(() -> mutation.prefixLength(original.prefixLength() == null ? 1 : original.prefixLength() + 1)); + mutators.add(() -> mutation.size(original.size() == null ? 1 : original.size() + 1)); + mutators.add(() -> mutation.maxTermFreq(original.maxTermFreq() == null ? 0.1f : original.maxTermFreq() + 0.1f)); + mutators.add(() -> mutation.minDocFreq(original.minDocFreq() == null ? 0.1f : original.minDocFreq() + 0.1f)); + mutators.add(() -> mutation.postFilter(original.postFilter() == null ? "postFilter" : original.postFilter() + "_other")); + mutators.add(() -> mutation.preFilter(original.preFilter() == null ? "preFilter" : original.preFilter() + "_other")); + mutators.add(() -> mutation.sort(original.sort() == null ? "score" : original.sort() + "_other")); + mutators.add( + () -> mutation.stringDistance(original.stringDistance() == null ? "levenstein" : original.stringDistance() + "_other")); + mutators.add(() -> mutation.suggestMode(original.suggestMode() == null ? "missing" : original.suggestMode() + "_other")); + return randomFrom(mutators).get(); + } + /** * creates random candidate generator, renders it to xContent and back to new instance that should be equal to original */ @@ -151,12 +146,12 @@ public class DirectCandidateGeneratorTests extends ESTestCase{ // test missing fieldname String directGenerator = "{ }"; assertIllegalXContent(directGenerator, IllegalArgumentException.class, - "[direct_generator] expects exactly one field parameter, but found []"); + "Required [field]"); // test two fieldnames directGenerator = "{ \"field\" : \"f1\", \"field\" : \"f2\" }"; - assertIllegalXContent(directGenerator, IllegalArgumentException.class, - "[direct_generator] expects exactly one field parameter, but found [f2, f1]"); + assertIllegalXContent(directGenerator, ParsingException.class, + "[direct_generator] failed to parse field [field]"); // test unknown field directGenerator = "{ \"unknown_param\" : \"f1\" }"; @@ -203,12 +198,7 @@ public class DirectCandidateGeneratorTests extends ESTestCase{ return generator; } - private static DirectCandidateGeneratorBuilder serializedCopy(DirectCandidateGeneratorBuilder original) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - original.writeTo(output); - try (StreamInput in = output.bytes().streamInput()) { - return new DirectCandidateGeneratorBuilder(in); - } - } + private static DirectCandidateGeneratorBuilder copy(DirectCandidateGeneratorBuilder original) throws IOException { + return copyWriteable(original, new NamedWriteableRegistry(Collections.emptyList()), DirectCandidateGeneratorBuilder::new); } } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilderTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilderTests.java index 4c5b3b8ca60..eab8440fa8c 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilderTests.java @@ -111,7 +111,7 @@ public class PhraseSuggestionBuilderTests extends AbstractSuggestionBuilderTestC case 6: Script collateQuery = builder.collateQuery(); if (collateQuery != null) { - builder.collateQuery(randomValueOtherThan(collateQuery.getScript(), () -> randomAsciiOfLengthBetween(3, 20))); + builder.collateQuery(randomValueOtherThan(collateQuery.getIdOrCode(), () -> randomAsciiOfLengthBetween(3, 20))); } else { builder.collateQuery(randomAsciiOfLengthBetween(3, 20)); } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java index c3d001b1fbc..29d7dc5e3ba 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java @@ -31,10 +31,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MultiFields; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -55,8 +52,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; public abstract class SmoothingModelTestCase extends ESTestCase { @@ -117,7 +113,6 @@ public abstract class SmoothingModelTestCase extends ESTestCase { */ public void testBuildWordScorer() throws IOException { SmoothingModel testModel = createTestModel(); - Map mapping = new HashMap<>(); mapping.put("field", new WhitespaceAnalyzer()); PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(), mapping); @@ -142,7 +137,7 @@ public abstract class SmoothingModelTestCase extends ESTestCase { */ public void testSerialization() throws IOException { SmoothingModel testModel = createTestModel(); - SmoothingModel deserializedModel = copyModel(testModel); + SmoothingModel deserializedModel = copy(testModel); assertEquals(testModel, deserializedModel); assertEquals(testModel.hashCode(), deserializedModel.hashCode()); assertNotSame(testModel, deserializedModel); @@ -151,42 +146,12 @@ public abstract class SmoothingModelTestCase extends ESTestCase { /** * Test equality and hashCode properties */ - @SuppressWarnings("unchecked") public void testEqualsAndHashcode() throws IOException { - SmoothingModel firstModel = createTestModel(); - assertFalse("smoothing model is equal to null", firstModel.equals(null)); - assertFalse("smoothing model is equal to incompatible type", firstModel.equals("")); - assertTrue("smoothing model is not equal to self", firstModel.equals(firstModel)); - assertThat("same smoothing model's hashcode returns different values if called multiple times", firstModel.hashCode(), - equalTo(firstModel.hashCode())); - assertThat("different smoothing models should not be equal", createMutation(firstModel), not(equalTo(firstModel))); - - SmoothingModel secondModel = copyModel(firstModel); - assertTrue("smoothing model is not equal to self", secondModel.equals(secondModel)); - assertTrue("smoothing model is not equal to its copy", firstModel.equals(secondModel)); - assertTrue("equals is not symmetric", secondModel.equals(firstModel)); - assertThat("smoothing model copy's hashcode is different from original hashcode", secondModel.hashCode(), - equalTo(firstModel.hashCode())); - - SmoothingModel thirdModel = copyModel(secondModel); - assertTrue("smoothing model is not equal to self", thirdModel.equals(thirdModel)); - assertTrue("smoothing model is not equal to its copy", secondModel.equals(thirdModel)); - assertThat("smoothing model copy's hashcode is different from original hashcode", secondModel.hashCode(), - equalTo(thirdModel.hashCode())); - assertTrue("equals is not transitive", firstModel.equals(thirdModel)); - assertThat("smoothing model copy's hashcode is different from original hashcode", firstModel.hashCode(), - equalTo(thirdModel.hashCode())); - assertTrue("equals is not symmetric", thirdModel.equals(secondModel)); - assertTrue("equals is not symmetric", thirdModel.equals(firstModel)); + checkEqualsAndHashCode(createTestModel(), this::copy, this::createMutation); } - static SmoothingModel copyModel(SmoothingModel original) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - original.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - return namedWriteableRegistry.getReader(SmoothingModel.class, original.getWriteableName()).read(in); - } - } + private SmoothingModel copy(SmoothingModel original) throws IOException { + return ESTestCase.copyWriteable(original, namedWriteableRegistry, + namedWriteableRegistry.getReader(SmoothingModel.class, original.getWriteableName())); } - } diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 3a045c80ac8..0e777cbd97a 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -58,7 +58,6 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.admin.cluster.RestClusterStateAction; import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.elasticsearch.snapshots.mockstore.MockRepository; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; @@ -89,11 +88,9 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -/** - */ @ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0) -@ESIntegTestCase.SuppressLocalMode // TODO only restorePersistentSettingsTest needs this maybe factor out? public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { + @Override protected Collection> nodePlugins() { return Arrays.asList(MockRepository.Plugin.class); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 3e43cc83045..b12b993a61b 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -34,10 +34,13 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotIndexStat import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.Client; @@ -51,20 +54,31 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; +import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexNameException; +import org.elasticsearch.ingest.IngestTestPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.StoredScriptsIT; +import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.junit.annotations.TestLogging; import java.nio.channels.SeekableByteChannel; @@ -73,6 +87,7 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -83,6 +98,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -106,6 +122,14 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IngestTestPlugin.class, + StoredScriptsIT.CustomScriptPlugin.class, + MockRepository.Plugin.class); + } + public void testBasicWorkFlow() throws Exception { Client client = client(); @@ -456,11 +480,39 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.builder().put("location", location))); - logger.info("--> creating test template"); - assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", XContentFactory.jsonBuilder().startObject().startObject("test-mapping").startObject("properties") - .startObject("field1").field("type", "string").field("store", "yes").endObject() - .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject() - .endObject().endObject().endObject()).get().isAcknowledged(), equalTo(true)); + boolean testTemplate = randomBoolean(); + boolean testPipeline = randomBoolean(); + boolean testScript = (testTemplate == false && testPipeline == false) || randomBoolean(); // At least something should be stored + + if(testTemplate) { + logger.info("--> creating test template"); + assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", XContentFactory.jsonBuilder().startObject().startObject("test-mapping").startObject("properties") + .startObject("field1").field("type", "string").field("store", "yes").endObject() + .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject() + .endObject().endObject().endObject()).get().isAcknowledged(), equalTo(true)); + } + + if(testPipeline) { + logger.info("--> creating test pipeline"); + BytesReference pipelineSource = jsonBuilder().startObject() + .field("description", "my_pipeline") + .startArray("processors") + .startObject() + .startObject("test") + .endObject() + .endObject() + .endArray() + .endObject().bytes(); + assertAcked(client().admin().cluster().preparePutPipeline("barbaz", pipelineSource).get()); + } + + if(testScript) { + logger.info("--> creating test script"); + assertAcked(client().admin().cluster().preparePutStoredScript() + .setScriptLang(MockScriptEngine.NAME) + .setId("foobar") + .setSource(new BytesArray("{\"script\":\"1\"}"))); + } logger.info("--> snapshot without global state"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state").setIndices().setIncludeGlobalState(false).setWaitForCompletion(true).get(); @@ -474,26 +526,52 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-with-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); - logger.info("--> delete test template"); - cluster().wipeTemplates("test-template"); - GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); - assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); + if (testTemplate) { + logger.info("--> delete test template"); + cluster().wipeTemplates("test-template"); + GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); + } + + if (testPipeline) { + logger.info("--> delete test pipeline"); + assertAcked(client().admin().cluster().deletePipeline(new DeletePipelineRequest("barbaz")).get()); + } + + if (testScript) { + logger.info("--> delete test script"); + assertAcked(client().admin().cluster().prepareDeleteStoredScript(MockScriptEngine.NAME, "foobar").get()); + } logger.info("--> try restoring cluster state from snapshot without global state"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); logger.info("--> check that template wasn't restored"); - getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); - logger.info("--> check that template is restored"); - getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); - assertIndexTemplateExists(getIndexTemplatesResponse, "test-template"); + if (testTemplate) { + logger.info("--> check that template is restored"); + getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + assertIndexTemplateExists(getIndexTemplatesResponse, "test-template"); + } + + if (testPipeline) { + logger.info("--> check that pipeline is restored"); + GetPipelineResponse getPipelineResponse = client().admin().cluster().prepareGetPipeline("barbaz").get(); + assertTrue(getPipelineResponse.isFound()); + } + + if (testScript) { + logger.info("--> check that script is restored"); + GetStoredScriptResponse getStoredScriptResponse = client().admin().cluster().prepareGetStoredScript(MockScriptEngine.NAME, "foobar").get(); + assertNotNull(getStoredScriptResponse.getStoredScript()); + } createIndex("test-idx"); ensureGreen(); @@ -511,9 +589,19 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state-with-index").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); - logger.info("--> delete test template and index "); + logger.info("--> delete global state and index "); cluster().wipeIndices("test-idx"); - cluster().wipeTemplates("test-template"); + if (testTemplate) { + cluster().wipeTemplates("test-template"); + } + if (testPipeline) { + assertAcked(client().admin().cluster().deletePipeline(new DeletePipelineRequest("barbaz")).get()); + } + + if (testScript) { + assertAcked(client().admin().cluster().prepareDeleteStoredScript(MockScriptEngine.NAME, "foobar").get()); + } + getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); @@ -522,9 +610,11 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); - logger.info("--> check that template wasn't restored but index was"); + logger.info("--> check that global state wasn't restored but index was"); getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); + assertFalse(client().admin().cluster().prepareGetPipeline("barbaz").get().isFound()); + assertNull(client().admin().cluster().prepareGetStoredScript(MockScriptEngine.NAME, "foobar").get().getStoredScript()); assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().totalHits(), equalTo(100L)); } @@ -686,6 +776,47 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas logger.info("--> total number of simulated failures during restore: [{}]", getFailureCount("test-repo")); } + public void testDataFileCorruptionDuringRestore() throws Exception { + Path repositoryLocation = randomRepoPath(); + Client client = client(); + logger.info("--> creating repository"); + assertAcked(client.admin().cluster().preparePutRepository("test-repo") + .setType("fs").setSettings(Settings.builder().put("location", repositoryLocation))); + + createIndex("test-idx"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().totalHits(), equalTo(100L)); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards())); + + logger.info("--> update repository with mock version"); + assertAcked(client.admin().cluster().preparePutRepository("test-repo") + .setType("mock").setSettings( + Settings.builder() + .put("location", repositoryLocation) + .put("random", randomAsciiOfLength(10)) + .put("use_lucene_corruption", true) + .put("max_failure_number", 10000000L) + .put("random_data_file_io_exception_rate", 1.0))); + + // Test restore after index deletion + logger.info("--> delete index"); + cluster().wipeIndices("test-idx"); + logger.info("--> restore corrupt index"); + RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards())); + } + public void testDeletionOfFailingToRecoverIndexShouldStopRestore() throws Exception { Path repositoryLocation = randomRepoPath(); Client client = client(); @@ -2199,32 +2330,6 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertFalse(snapshotListener.timedOut()); // Check that cluster state update task was called only once assertEquals(1, snapshotListener.count()); - - logger.info("--> close indices"); - client.admin().indices().prepareClose("test-idx").get(); - - BlockingClusterStateListener restoreListener = new BlockingClusterStateListener(clusterService, "restore_snapshot[", "update snapshot state", Priority.HIGH); - - try { - clusterService.addFirst(restoreListener); - logger.info("--> restore snapshot"); - ListenableActionFuture futureRestore = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute(); - - // Await until shard updates are in pending state. - assertBusyPendingTasks("update snapshot state", numberOfShards); - restoreListener.unblock(); - - RestoreSnapshotResponse restoreSnapshotResponse = futureRestore.actionGet(); - assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(numberOfShards)); - - } finally { - clusterService.remove(restoreListener); - } - - // Check that we didn't timeout - assertFalse(restoreListener.timedOut()); - // Check that cluster state update task was called only once - assertEquals(1, restoreListener.count()); } public void testSnapshotName() throws Exception { @@ -2398,8 +2503,28 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } refresh(); + // make sure we return only the in-progress snapshot when taking the first snapshot on a clean repository + // take initial snapshot with a block, making sure we only get 1 in-progress snapshot returned + // block a node so the create snapshot operation can remain in progress + final String initialBlockedNode = blockNodeWithIndex(repositoryName, indexName); + ListenableActionFuture responseListener = + client.admin().cluster().prepareCreateSnapshot(repositoryName, "snap-on-empty-repo") + .setWaitForCompletion(false) + .setIndices(indexName) + .execute(); + waitForBlock(initialBlockedNode, repositoryName, TimeValue.timeValueSeconds(60)); // wait for block to kick in + getSnapshotsResponse = client.admin().cluster() + .prepareGetSnapshots("test-repo") + .setSnapshots(randomFrom("_all", "_current", "snap-on-*", "*-on-empty-repo", "snap-on-empty-repo")) + .get(); + assertEquals(1, getSnapshotsResponse.getSnapshots().size()); + assertEquals("snap-on-empty-repo", getSnapshotsResponse.getSnapshots().get(0).snapshotId().getName()); + unblockNode(repositoryName, initialBlockedNode); // unblock node + responseListener.actionGet(TimeValue.timeValueMillis(10000L)); // timeout after 10 seconds + client.admin().cluster().prepareDeleteSnapshot(repositoryName, "snap-on-empty-repo").get(); + final int numSnapshots = randomIntBetween(1, 3) + 1; - logger.info("--> take {} snapshot(s)", numSnapshots); + logger.info("--> take {} snapshot(s)", numSnapshots - 1); final String[] snapshotNames = new String[numSnapshots]; for (int i = 0; i < numSnapshots - 1; i++) { final String snapshotName = randomAsciiOfLength(8).toLowerCase(Locale.ROOT); @@ -2431,9 +2556,19 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas logger.info("--> get all snapshots with a current in-progress"); // with ignore unavailable set to true, should not throw an exception + final List snapshotsToGet = new ArrayList<>(); + if (randomBoolean()) { + // use _current plus the individual names of the finished snapshots + snapshotsToGet.add("_current"); + for (int i = 0; i < numSnapshots - 1; i++) { + snapshotsToGet.add(snapshotNames[i]); + } + } else { + snapshotsToGet.add("_all"); + } getSnapshotsResponse = client.admin().cluster() .prepareGetSnapshots(repositoryName) - .addSnapshots("_all") + .setSnapshots(snapshotsToGet.toArray(Strings.EMPTY_ARRAY)) .get(); List sortedNames = Arrays.asList(snapshotNames); Collections.sort(sortedNames); @@ -2475,4 +2610,66 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas waitForCompletion(repositoryName, inProgressSnapshot, TimeValue.timeValueSeconds(60)); } + /** + * This test ensures that when a shard is removed from a node (perhaps due to the node + * leaving the cluster, then returning), all snapshotting of that shard is aborted, so + * all Store references held onto by the snapshot are released. + * + * See https://github.com/elastic/elasticsearch/issues/20876 + */ + public void testSnapshotCanceledOnRemovedShard() throws Exception { + final int numPrimaries = 1; + final int numReplicas = 1; + final int numDocs = 100; + final String repo = "test-repo"; + final String index = "test-idx"; + final String snapshot = "test-snap"; + + assertAcked(prepareCreate(index, 1, + Settings.builder().put("number_of_shards", numPrimaries).put("number_of_replicas", numReplicas))); + + logger.info("--> indexing some data"); + for (int i = 0; i < numDocs; i++) { + index(index, "doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + + logger.info("--> creating repository"); + PutRepositoryResponse putRepositoryResponse = + client().admin().cluster().preparePutRepository(repo).setType("mock").setSettings(Settings.builder() + .put("location", randomRepoPath()) + .put("random", randomAsciiOfLength(10)) + .put("wait_after_unblock", 200) + ).get(); + assertTrue(putRepositoryResponse.isAcknowledged()); + + String blockedNode = blockNodeWithIndex(repo, index); + + logger.info("--> snapshot"); + client().admin().cluster().prepareCreateSnapshot(repo, snapshot) + .setWaitForCompletion(false) + .execute(); + + logger.info("--> waiting for block to kick in on node [{}]", blockedNode); + waitForBlock(blockedNode, repo, TimeValue.timeValueSeconds(10)); + + logger.info("--> removing primary shard that is being snapshotted"); + ClusterState clusterState = internalCluster().clusterService(internalCluster().getMasterName()).state(); + IndexRoutingTable indexRoutingTable = clusterState.getRoutingTable().index(index); + String nodeWithPrimary = clusterState.nodes().get(indexRoutingTable.shard(0).primaryShard().currentNodeId()).getName(); + assertNotNull("should be at least one node with a primary shard", nodeWithPrimary); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeWithPrimary); + IndexService indexService = indicesService.indexService(resolveIndex(index)); + indexService.removeShard(0, "simulate node removal"); + + logger.info("--> unblocking blocked node [{}]", blockedNode); + unblockNode(repo, blockedNode); + + logger.info("--> ensuring snapshot is aborted and the aborted shard was marked as failed"); + SnapshotInfo snapshotInfo = waitForCompletion(repo, snapshot, TimeValue.timeValueSeconds(10)); + assertEquals(1, snapshotInfo.shardFailures().size()); + assertEquals(0, snapshotInfo.shardFailures().get(0).shardId()); + assertEquals("IndexShardSnapshotFailedException[Aborted]", snapshotInfo.shardFailures().get(0).reason()); + } + } diff --git a/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java b/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java index 1763ec1a3e0..5b4b295d408 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java @@ -26,8 +26,6 @@ import java.util.List; import static org.hamcrest.Matchers.containsInAnyOrder; -/** - */ public class SnapshotUtilsTests extends ESTestCase { public void testIndexNameFiltering() { assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{}, new String[]{"foo", "bar", "baz"}); diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java index 72f17039f26..56a4a279cab 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java +++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java @@ -26,9 +26,6 @@ import java.io.IOException; import java.io.InputStream; import java.util.Map; -/** - * - */ public class BlobContainerWrapper implements BlobContainer { private BlobContainer delegate; diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java index 5ac1e82dba4..08e0c6fdcfa 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java +++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java @@ -24,9 +24,6 @@ import org.elasticsearch.common.blobstore.BlobStore; import java.io.IOException; -/** - * - */ public class BlobStoreWrapper implements BlobStore { private BlobStore delegate; diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index 06c4ec10af0..ca3aeb674bd 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -33,6 +33,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLong; +import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; @@ -81,6 +82,8 @@ public class MockRepository extends FsRepository { private final double randomDataFileIOExceptionRate; + private final boolean useLuceneCorruptionException; + private final long maximumNumberOfFailures; private final long waitAfterUnblock; @@ -101,6 +104,7 @@ public class MockRepository extends FsRepository { super(overrideSettings(metadata, environment), environment); randomControlIOExceptionRate = metadata.settings().getAsDouble("random_control_io_exception_rate", 0.0); randomDataFileIOExceptionRate = metadata.settings().getAsDouble("random_data_file_io_exception_rate", 0.0); + useLuceneCorruptionException = metadata.settings().getAsBoolean("use_lucene_corruption", false); maximumNumberOfFailures = metadata.settings().getAsLong("max_failure_number", 100L); blockOnControlFiles = metadata.settings().getAsBoolean("block_on_control", false); blockOnDataFiles = metadata.settings().getAsBoolean("block_on_data", false); @@ -245,7 +249,11 @@ public class MockRepository extends FsRepository { if (blobName.startsWith("__")) { if (shouldFail(blobName, randomDataFileIOExceptionRate) && (incrementAndGetFailureCount() < maximumNumberOfFailures)) { logger.info("throwing random IOException for file [{}] at path [{}]", blobName, path()); - throw new IOException("Random IOException"); + if (useLuceneCorruptionException) { + throw new CorruptIndexException("Random corruption", "random file"); + } else { + throw new IOException("Random IOException"); + } } else if (blockOnDataFiles) { logger.info("blocking I/O operation for file [{}] at path [{}]", blobName, path()); if (blockExecution() && waitAfterUnblock > 0) { diff --git a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java index 20c82e6f518..e18ba0fe322 100644 --- a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java +++ b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java @@ -19,7 +19,7 @@ package org.elasticsearch.test.geo; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.vividsolutions.jts.algorithm.ConvexHull; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; @@ -61,7 +61,7 @@ public class RandomShapeGenerator extends RandomGeoGenerator { private static final ShapeType[] types = values(); public static ShapeType randomType(Random r) { - return types[RandomInts.randomIntBetween(r, 0, types.length - 1)]; + return types[RandomNumbers.randomIntBetween(r, 0, types.length - 1)]; } } @@ -115,7 +115,7 @@ public class RandomShapeGenerator extends RandomGeoGenerator { throws InvalidShapeException { if (numGeometries <= 0) { // cap geometry collection at 4 shapes (to save test time) - numGeometries = RandomInts.randomIntBetween(r, 2, 4); + numGeometries = RandomNumbers.randomIntBetween(r, 2, 4); } if (nearPoint == null) { @@ -187,7 +187,7 @@ public class RandomShapeGenerator extends RandomGeoGenerator { // for random testing having a maximum number of 10 points for a line string is more than sufficient // if this number gets out of hand, the number of self intersections for a linestring can become // (n^2-n)/2 and computing the relation intersection matrix will become NP-Hard - int numPoints = RandomInts.randomIntBetween(r, 3, 10); + int numPoints = RandomNumbers.randomIntBetween(r, 3, 10); CoordinatesBuilder coordinatesBuilder = new CoordinatesBuilder(); for (int i=0; i= 90; + return r.nextInt(100) >= 90; } private static Range xRandomRange(Random r, double near, Range bounds) { diff --git a/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java b/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java index 48ea8b6c8c9..5ec0f30f520 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java @@ -33,7 +33,7 @@ public class FixedThreadPoolTests extends ESThreadPoolTestCase { final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED); // some of the fixed thread pool are bound by the number of // cores so we can not exceed that - final int size = randomIntBetween(1, EsExecutors.boundedNumberOfProcessors(Settings.EMPTY)); + final int size = randomIntBetween(1, EsExecutors.numberOfProcessors(Settings.EMPTY)); final int queueSize = randomIntBetween(1, 16); final long rejections = randomIntBetween(1, 16); diff --git a/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index 56e18d5335e..1b5202762c6 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -49,8 +49,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.containsString; -/** - */ @ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class SimpleThreadPoolIT extends ESIntegTestCase { @Override diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index 87accf057ad..29053400931 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -62,7 +62,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { public void testIndexingThreadPoolsMaxSize() throws InterruptedException { final String name = randomFrom(Names.BULK, Names.INDEX); - final int maxSize = 1 + EsExecutors.boundedNumberOfProcessors(Settings.EMPTY); + final int maxSize = 1 + EsExecutors.numberOfProcessors(Settings.EMPTY); final int tooBig = randomIntBetween(1 + maxSize, Integer.MAX_VALUE); // try to create a too big thread pool @@ -89,7 +89,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { private static int getExpectedThreadPoolSize(Settings settings, String name, int size) { if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) { - return Math.min(size, EsExecutors.boundedNumberOfProcessors(settings)); + return Math.min(size, EsExecutors.numberOfProcessors(settings)); } else { return size; } @@ -185,7 +185,7 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { new ScalingExecutorBuilder( "my_pool1", 1, - EsExecutors.boundedNumberOfProcessors(Settings.EMPTY), + EsExecutors.numberOfProcessors(Settings.EMPTY), TimeValue.timeValueMinutes(1)); final FixedExecutorBuilder fixed = new FixedExecutorBuilder(Settings.EMPTY, "my_pool2", 1, 1); diff --git a/core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampIT.java b/core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampIT.java index 2fb63335873..e64a695d888 100644 --- a/core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampIT.java +++ b/core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampIT.java @@ -45,8 +45,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -/** - */ public class SimpleTimestampIT extends ESIntegTestCase { private static final Settings BW_SETTINGS = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0).build(); diff --git a/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java b/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java index 531c06f5bec..7fccc42bb79 100644 --- a/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java +++ b/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java @@ -66,7 +66,7 @@ public class TransportServiceHandshakeTests extends ESTestCase { new NamedWriteableRegistry(Collections.emptyList()), new NetworkService(settings, Collections.emptyList())); TransportService transportService = new MockTransportService(settings, transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); DiscoveryNode node = diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 440859dce44..6121b2c0c86 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -19,6 +19,18 @@ package org.elasticsearch.tribe; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.client.Client; @@ -33,38 +45,23 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.MasterNotDiscoveredException; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; -import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.MockTcpTransportPlugin; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.function.Predicate; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; - import static java.util.stream.Collectors.toSet; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -193,23 +190,13 @@ public class TribeIT extends ESIntegTestCase { settings.put(Node.NODE_DATA_SETTING.getKey(), false); settings.put(Node.NODE_MASTER_SETTING.getKey(), true); settings.put(NetworkModule.HTTP_ENABLED.getKey(), false); - settings.put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), NetworkModule.LOCAL_TRANSPORT); - settings.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), NetworkModule.LOCAL_TRANSPORT); + settings.put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME); doWithAllClusters(filter, c -> { String tribeSetting = "tribe." + c.getClusterName() + "."; settings.put(tribeSetting + ClusterName.CLUSTER_NAME_SETTING.getKey(), c.getClusterName()); settings.put(tribeSetting + DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "100ms"); - settings.put(tribeSetting + NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), NetworkModule.LOCAL_TRANSPORT); - settings.put(tribeSetting + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), NetworkModule.LOCAL_TRANSPORT); - - Set hosts = new HashSet<>(); - for (Transport transport : c.getInstances(Transport.class)) { - TransportAddress address = transport.boundAddress().publishAddress(); - hosts.add(address.getHost() + ":" + address.getPort()); - } - settings.putArray(tribeSetting + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), - hosts.toArray(new String[hosts.size()])); + settings.put(tribeSetting + NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME); }); return settings; @@ -496,7 +483,7 @@ public class TribeIT extends ESIntegTestCase { assertBusy(() -> { ClusterState state = client().admin().cluster().prepareState().setNodes(true).get().getState(); Set nodes = StreamSupport.stream(state.getNodes().spliterator(), false).map(DiscoveryNode::getName).collect(toSet()); - assertThat(nodes.containsAll(expectedNodes), is(true)); + assertThat(nodes, containsInAnyOrder(expectedNodes.toArray())); }); } diff --git a/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java b/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java index ac142fa461d..39f1c774634 100644 --- a/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java +++ b/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.engine.DocumentMissingException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.sort.SortOrder; @@ -115,7 +115,7 @@ public class TimestampTTLBWIT extends ESIntegTestCase { try { client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)).execute().actionGet(); + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())).execute().actionGet(); fail(); } catch (DocumentMissingException e) { // all is well @@ -127,15 +127,15 @@ public class TimestampTTLBWIT extends ESIntegTestCase { long ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue(); assertThat(ttl, greaterThan(0L)); client().prepareUpdate(indexOrAlias(), "type1", "2") - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)).execute().actionGet(); + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())).execute().actionGet(); getResponse = client().prepareGet("test", "type1", "2").setStoredFields("_ttl").execute().actionGet(); ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue(); assertThat(ttl, greaterThan(0L)); // check TTL update client().prepareUpdate(indexOrAlias(), "type1", "2") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", - Collections.singletonMap("_ctx", Collections.singletonMap("_ttl", 3600000)))).execute().actionGet(); + .setScript(new Script(ScriptType.INLINE, "put_values", "", + Collections.singletonMap("_ctx", Collections.singletonMap("_ttl", 3600000)))).execute().actionGet(); getResponse = client().prepareGet("test", "type1", "2").setStoredFields("_ttl").execute().actionGet(); ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue(); assertThat(ttl, greaterThan(0L)); @@ -144,8 +144,8 @@ public class TimestampTTLBWIT extends ESIntegTestCase { // check timestamp update client().prepareIndex("test", "type1", "3").setSource("field", 1).setRefreshPolicy(IMMEDIATE).get(); client().prepareUpdate(indexOrAlias(), "type1", "3") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", - Collections.singletonMap("_ctx", Collections.singletonMap("_timestamp", "2009-11-15T14:12:12")))).execute() + .setScript(new Script(ScriptType.INLINE, "put_values", "", + Collections.singletonMap("_ctx", Collections.singletonMap("_timestamp", "2009-11-15T14:12:12")))).execute() .actionGet(); getResponse = client().prepareGet("test", "type1", "3").setStoredFields("_timestamp").execute().actionGet(); long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue(); @@ -198,7 +198,7 @@ public class TimestampTTLBWIT extends ESIntegTestCase { // Update the first object and note context variables values UpdateResponse updateResponse = client().prepareUpdate("test", "subtype1", "id1") .setRouting("routing1") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "extract_ctx", null)) + .setScript(new Script(ScriptType.INLINE, "extract_ctx", "", Collections.emptyMap())) .execute().actionGet(); assertEquals(2, updateResponse.getVersion()); @@ -215,7 +215,7 @@ public class TimestampTTLBWIT extends ESIntegTestCase { // Idem with the second object updateResponse = client().prepareUpdate("test", "type1", "parentId1") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "extract_ctx", null)) + .setScript(new Script(ScriptType.INLINE, "extract_ctx", "", Collections.emptyMap())) .execute().actionGet(); assertEquals(2, updateResponse.getVersion()); diff --git a/core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptIT.java b/core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptIT.java index 31ce9861e45..59826608456 100644 --- a/core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptIT.java +++ b/core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptIT.java @@ -26,8 +26,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.NativeScriptEngineService; import org.elasticsearch.script.NativeScriptFactory; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptModule; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -42,9 +41,6 @@ import java.util.Map; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; -/** - * - */ @ClusterScope(scope= Scope.SUITE, numDataNodes =1) public class UpdateByNativeScriptIT extends ESIntegTestCase { @@ -61,7 +57,7 @@ public class UpdateByNativeScriptIT extends ESIntegTestCase { Map params = new HashMap<>(); params.put("foo", "SETVALUE"); client().prepareUpdate("test", "type", "1") - .setScript(new Script("custom", ScriptService.ScriptType.INLINE, NativeScriptEngineService.NAME, params)).get(); + .setScript(new Script(ScriptType.INLINE, NativeScriptEngineService.NAME, "custom", params)).get(); Map data = client().prepareGet("test", "type", "1").get().getSource(); assertThat(data, hasKey("foo")); diff --git a/core/src/test/java/org/elasticsearch/update/UpdateIT.java b/core/src/test/java/org/elasticsearch/update/UpdateIT.java index fc360effb03..a3903023edf 100644 --- a/core/src/test/java/org/elasticsearch/update/UpdateIT.java +++ b/core/src/test/java/org/elasticsearch/update/UpdateIT.java @@ -44,7 +44,7 @@ import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptEngineService; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESIntegTestCase; @@ -369,7 +369,7 @@ public class UpdateIT extends ESIntegTestCase { UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())) .execute().actionGet(); assertEquals(DocWriteResponse.Result.CREATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -381,7 +381,7 @@ public class UpdateIT extends ESIntegTestCase { updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())) .execute().actionGet(); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -410,7 +410,7 @@ public class UpdateIT extends ESIntegTestCase { UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject()) .setScriptedUpsert(true) - .setScript(new Script("", ScriptService.ScriptType.INLINE, "scripted_upsert", params)) + .setScript(new Script(ScriptType.INLINE, "scripted_upsert", "", params)) .execute().actionGet(); assertEquals(DocWriteResponse.Result.CREATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -424,7 +424,7 @@ public class UpdateIT extends ESIntegTestCase { updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject()) .setScriptedUpsert(true) - .setScript(new Script("", ScriptService.ScriptType.INLINE, "scripted_upsert", params)) + .setScript(new Script(ScriptType.INLINE, "scripted_upsert", "", params)) .execute().actionGet(); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -468,7 +468,7 @@ public class UpdateIT extends ESIntegTestCase { UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("extra", "foo"))) + .setScript(new Script(ScriptType.INLINE, "put_values", "", Collections.singletonMap("extra", "foo"))) .setFetchSource(true) .execute().actionGet(); @@ -480,7 +480,7 @@ public class UpdateIT extends ESIntegTestCase { updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("extra", "foo"))) + .setScript(new Script(ScriptType.INLINE, "put_values", "", Collections.singletonMap("extra", "foo"))) .setFields("_source") .execute().actionGet(); @@ -498,24 +498,24 @@ public class UpdateIT extends ESIntegTestCase { index("test", "type", "1", "text", "value"); // version is now 1 assertThrows(client().prepareUpdate(indexOrAlias(), "type", "1") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v2"))).setVersion(2) + .setScript(new Script(ScriptType.INLINE, "put_values", "", Collections.singletonMap("text", "v2"))).setVersion(2) .execute(), VersionConflictEngineException.class); client().prepareUpdate(indexOrAlias(), "type", "1") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v2"))).setVersion(1).get(); + .setScript(new Script(ScriptType.INLINE, "put_values", "", Collections.singletonMap("text", "v2"))).setVersion(1).get(); assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(2L)); // and again with a higher version.. client().prepareUpdate(indexOrAlias(), "type", "1") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v3"))).setVersion(2).get(); + .setScript(new Script(ScriptType.INLINE, "put_values", "", Collections.singletonMap("text", "v3"))).setVersion(2).get(); assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(3L)); // after delete client().prepareDelete("test", "type", "1").get(); assertThrows(client().prepareUpdate("test", "type", "1") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v2"))).setVersion(3) + .setScript(new Script(ScriptType.INLINE, "put_values", "", Collections.singletonMap("text", "v2"))).setVersion(3) .execute(), DocumentMissingException.class); @@ -523,7 +523,7 @@ public class UpdateIT extends ESIntegTestCase { client().prepareIndex("test", "type", "2").setSource("text", "value").setVersion(10).setVersionType(VersionType.EXTERNAL).get(); assertThrows(client().prepareUpdate(indexOrAlias(), "type", "2") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v2"))).setVersion(2) + .setScript(new Script(ScriptType.INLINE, "put_values", "", Collections.singletonMap("text", "v2"))).setVersion(2) .setVersionType(VersionType.EXTERNAL).execute(), ActionRequestValidationException.class); @@ -535,7 +535,7 @@ public class UpdateIT extends ESIntegTestCase { // With internal versions, tt means "if object is there with version X, update it or explode. If it is not there, index. client().prepareUpdate(indexOrAlias(), "type", "3") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("text", "v2"))) + .setScript(new Script(ScriptType.INLINE, "put_values", "", Collections.singletonMap("text", "v2"))) .setVersion(10).setUpsert("{ \"text\": \"v0\" }").get(); get = get("test", "type", "3"); assertThat(get.getVersion(), equalTo(1L)); @@ -548,7 +548,7 @@ public class UpdateIT extends ESIntegTestCase { public void testIndexAutoCreation() throws Exception { UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("extra", "foo"))) + .setScript(new Script(ScriptType.INLINE, "put_values", "", Collections.singletonMap("extra", "foo"))) .setFetchSource(true) .execute().actionGet(); @@ -565,7 +565,7 @@ public class UpdateIT extends ESIntegTestCase { try { client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)).execute().actionGet(); + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())).execute().actionGet(); fail(); } catch (DocumentMissingException e) { // all is well @@ -574,7 +574,7 @@ public class UpdateIT extends ESIntegTestCase { client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)).execute().actionGet(); + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())).execute().actionGet(); assertThat(updateResponse.getVersion(), equalTo(2L)); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -587,7 +587,7 @@ public class UpdateIT extends ESIntegTestCase { Map params = new HashMap<>(); params.put("inc", 3); updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", params)).execute().actionGet(); + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", params)).execute().actionGet(); assertThat(updateResponse.getVersion(), equalTo(3L)); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -599,7 +599,7 @@ public class UpdateIT extends ESIntegTestCase { // check noop updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("_ctx", Collections.singletonMap("op", "none")))).execute().actionGet(); + .setScript(new Script(ScriptType.INLINE, "put_values", "", Collections.singletonMap("_ctx", Collections.singletonMap("op", "none")))).execute().actionGet(); assertThat(updateResponse.getVersion(), equalTo(3L)); assertEquals(DocWriteResponse.Result.NOOP, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -611,7 +611,7 @@ public class UpdateIT extends ESIntegTestCase { // check delete updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "put_values", Collections.singletonMap("_ctx", Collections.singletonMap("op", "delete")))).execute().actionGet(); + .setScript(new Script(ScriptType.INLINE, "put_values", "", Collections.singletonMap("_ctx", Collections.singletonMap("op", "delete")))).execute().actionGet(); assertThat(updateResponse.getVersion(), equalTo(4L)); assertEquals(DocWriteResponse.Result.DELETED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -624,7 +624,7 @@ public class UpdateIT extends ESIntegTestCase { // check fields parameter client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())) .setFields("field") .setFetchSource(true) .execute().actionGet(); @@ -637,7 +637,7 @@ public class UpdateIT extends ESIntegTestCase { // check _source parameter client().prepareIndex("test", "type1", "1").setSource("field1", 1, "field2", 2).execute().actionGet(); updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("field1", ScriptService.ScriptType.INLINE, "field_inc", null)) + .setScript(new Script(ScriptType.INLINE, "field_inc", "field1", Collections.emptyMap())) .setFetchSource("field1", "field2") .get(); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -700,7 +700,7 @@ public class UpdateIT extends ESIntegTestCase { try { client().prepareUpdate(indexOrAlias(), "type1", "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())) .execute().actionGet(); fail("Should have thrown ActionRequestValidationException"); } catch (ActionRequestValidationException e) { @@ -715,7 +715,7 @@ public class UpdateIT extends ESIntegTestCase { ensureGreen(); try { client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())) .setDocAsUpsert(true) .execute().actionGet(); fail("Should have thrown ActionRequestValidationException"); @@ -767,7 +767,7 @@ public class UpdateIT extends ESIntegTestCase { // Update the first object and note context variables values UpdateResponse updateResponse = client().prepareUpdate("test", "subtype1", "id1") .setRouting("routing1") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "extract_ctx", null)) + .setScript(new Script(ScriptType.INLINE, "extract_ctx", "", Collections.emptyMap())) .execute().actionGet(); assertEquals(2, updateResponse.getVersion()); @@ -783,7 +783,7 @@ public class UpdateIT extends ESIntegTestCase { // Idem with the second object updateResponse = client().prepareUpdate("test", "type1", "parentId1") - .setScript(new Script("", ScriptService.ScriptType.INLINE, "extract_ctx", null)) + .setScript(new Script(ScriptType.INLINE, "extract_ctx", "", Collections.emptyMap())) .execute().actionGet(); assertEquals(2, updateResponse.getVersion()); @@ -822,13 +822,13 @@ public class UpdateIT extends ESIntegTestCase { } if (useBulkApi) { UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "type1", Integer.toString(i)) - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()); client().prepareBulk().add(updateRequestBuilder).execute().actionGet(); } else { client().prepareUpdate(indexOrAlias(), "type1", Integer.toString(i)) - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()) .execute().actionGet(); @@ -948,7 +948,7 @@ public class UpdateIT extends ESIntegTestCase { updateRequestsOutstanding.acquire(); try { UpdateRequest ur = client().prepareUpdate("test", "type1", Integer.toString(j)) - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())) .setRetryOnConflict(retryOnConflict) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()) .request(); @@ -1048,7 +1048,7 @@ public class UpdateIT extends ESIntegTestCase { //All the previous operations should be complete or failed at this point for (int i = 0; i < numberOfIdsPerThread; ++i) { UpdateResponse ur = client().prepareUpdate("test", "type1", Integer.toString(i)) - .setScript(new Script("field", ScriptService.ScriptType.INLINE, "field_inc", null)) + .setScript(new Script(ScriptType.INLINE, "field_inc", "field", Collections.emptyMap())) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()) .execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 79ee4389527..0a26ce32e6d 100644 --- a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -49,9 +49,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -/** - * - */ @ClusterScope(randomDynamicTemplates = false, scope = Scope.SUITE) public class SimpleValidateQueryIT extends ESIntegTestCase { public void testSimpleValidateQuery() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java b/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java index e2c572f783a..8edcbd45a8c 100644 --- a/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java @@ -32,9 +32,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - * - */ public class ConcurrentDocumentOperationIT extends ESIntegTestCase { public void testConcurrentOperationOnSameDoc() throws Exception { logger.info("--> create an index with 1 shard and max replicas based on nodes"); diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index c5d0129644a..ccb392be1fc 100644 --- a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -21,9 +21,9 @@ package org.elasticsearch.versioning; import org.apache.lucene.util.TestUtil; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.lucene.uid.Versions; @@ -47,9 +47,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThro import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -/** - * - */ public class SimpleVersioningIT extends ESIntegTestCase { public void testExternalVersioningInitialDelete() throws Exception { createIndex("test"); @@ -689,7 +686,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(IndexRequest.OpType.INDEX) + .setOpType(DocWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() @@ -758,7 +755,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { client() .prepareIndex("test", "type", "id") .setSource("foo", "bar") - .setOpType(IndexRequest.OpType.INDEX) + .setOpType(DocWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) .execute() diff --git a/core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java b/core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java index 82a3a55868a..86bad44fbf1 100644 --- a/core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java +++ b/core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java @@ -29,9 +29,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -/** - * - */ public class ResourceWatcherServiceTests extends ESTestCase { public void testSettings() throws Exception { ThreadPool threadPool = new TestThreadPool("test"); diff --git a/core/src/test/resources/indices/bwc/index-2.0.0-beta1.zip b/core/src/test/resources/indices/bwc/index-2.0.0-beta1.zip index ca3d11099ce..6d609479552 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.0.0-beta1.zip and b/core/src/test/resources/indices/bwc/index-2.0.0-beta1.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.0.0-beta2.zip b/core/src/test/resources/indices/bwc/index-2.0.0-beta2.zip index 47496a9f012..6732f715cfe 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.0.0-beta2.zip and b/core/src/test/resources/indices/bwc/index-2.0.0-beta2.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.0.0-rc1.zip b/core/src/test/resources/indices/bwc/index-2.0.0-rc1.zip index 3b459959410..8c440725e9c 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.0.0-rc1.zip and b/core/src/test/resources/indices/bwc/index-2.0.0-rc1.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.0.0.zip b/core/src/test/resources/indices/bwc/index-2.0.0.zip index 2dae323f69e..cc0a0ae5320 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.0.0.zip and b/core/src/test/resources/indices/bwc/index-2.0.0.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.0.1.zip b/core/src/test/resources/indices/bwc/index-2.0.1.zip index 2d0d5f42d50..81a31d18f81 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.0.1.zip and b/core/src/test/resources/indices/bwc/index-2.0.1.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.0.2.zip b/core/src/test/resources/indices/bwc/index-2.0.2.zip index f6a9492b33f..63be140108c 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.0.2.zip and b/core/src/test/resources/indices/bwc/index-2.0.2.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.1.0.zip b/core/src/test/resources/indices/bwc/index-2.1.0.zip index 347d9cb31e9..dff157c2ab2 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.1.0.zip and b/core/src/test/resources/indices/bwc/index-2.1.0.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.1.1.zip b/core/src/test/resources/indices/bwc/index-2.1.1.zip index 6981c9af4a9..b7c408e5597 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.1.1.zip and b/core/src/test/resources/indices/bwc/index-2.1.1.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.1.2.zip b/core/src/test/resources/indices/bwc/index-2.1.2.zip index 57162675b14..d6a4f9fddfa 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.1.2.zip and b/core/src/test/resources/indices/bwc/index-2.1.2.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.2.0.zip b/core/src/test/resources/indices/bwc/index-2.2.0.zip index 81ff74d5abf..5c9eba5c616 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.2.0.zip and b/core/src/test/resources/indices/bwc/index-2.2.0.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.2.1.zip b/core/src/test/resources/indices/bwc/index-2.2.1.zip index 7e640e4158f..3596820a44a 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.2.1.zip and b/core/src/test/resources/indices/bwc/index-2.2.1.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.2.2.zip b/core/src/test/resources/indices/bwc/index-2.2.2.zip index f6c5c7653d1..788ba0712b5 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.2.2.zip and b/core/src/test/resources/indices/bwc/index-2.2.2.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.3.0.zip b/core/src/test/resources/indices/bwc/index-2.3.0.zip index c09e5d8ba19..212d3f8c7cf 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.3.0.zip and b/core/src/test/resources/indices/bwc/index-2.3.0.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.3.1.zip b/core/src/test/resources/indices/bwc/index-2.3.1.zip index de10f7926df..b825872bb55 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.3.1.zip and b/core/src/test/resources/indices/bwc/index-2.3.1.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.3.2.zip b/core/src/test/resources/indices/bwc/index-2.3.2.zip index eff6c8cd156..f6b8ec502d9 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.3.2.zip and b/core/src/test/resources/indices/bwc/index-2.3.2.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.3.3.zip b/core/src/test/resources/indices/bwc/index-2.3.3.zip index 751819741b3..e349aac5376 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.3.3.zip and b/core/src/test/resources/indices/bwc/index-2.3.3.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.3.4.zip b/core/src/test/resources/indices/bwc/index-2.3.4.zip index b69f100398a..bc75ad093cf 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.3.4.zip and b/core/src/test/resources/indices/bwc/index-2.3.4.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.3.5.zip b/core/src/test/resources/indices/bwc/index-2.3.5.zip index dd64e699954..c01af7a2062 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.3.5.zip and b/core/src/test/resources/indices/bwc/index-2.3.5.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.4.0.zip b/core/src/test/resources/indices/bwc/index-2.4.0.zip index 14bd436b164..5055ded5f87 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.4.0.zip and b/core/src/test/resources/indices/bwc/index-2.4.0.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.4.1.zip b/core/src/test/resources/indices/bwc/index-2.4.1.zip new file mode 100644 index 00000000000..6dc29439a0f Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-2.4.1.zip differ diff --git a/core/src/test/resources/indices/bwc/index-5.0.0.zip b/core/src/test/resources/indices/bwc/index-5.0.0.zip new file mode 100644 index 00000000000..f8deb41276b Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-5.0.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.0.0-beta1.zip b/core/src/test/resources/indices/bwc/repo-2.0.0-beta1.zip index 4a46dbc8382..b32f2a48d74 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.0.0-beta1.zip and b/core/src/test/resources/indices/bwc/repo-2.0.0-beta1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.0.0-beta2.zip b/core/src/test/resources/indices/bwc/repo-2.0.0-beta2.zip index 6e4080a9146..2b2663a4201 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.0.0-beta2.zip and b/core/src/test/resources/indices/bwc/repo-2.0.0-beta2.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zip b/core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zip index deb36fee119..59b31f5cc3b 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zip and b/core/src/test/resources/indices/bwc/repo-2.0.0-rc1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.0.0.zip b/core/src/test/resources/indices/bwc/repo-2.0.0.zip index 8042696cb90..ae6668be1cf 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.0.0.zip and b/core/src/test/resources/indices/bwc/repo-2.0.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.0.1.zip b/core/src/test/resources/indices/bwc/repo-2.0.1.zip index 6e9b3d0aede..c675125226c 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.0.1.zip and b/core/src/test/resources/indices/bwc/repo-2.0.1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.0.2.zip b/core/src/test/resources/indices/bwc/repo-2.0.2.zip index 4dd61b0f26a..2e1062c294b 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.0.2.zip and b/core/src/test/resources/indices/bwc/repo-2.0.2.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.1.0.zip b/core/src/test/resources/indices/bwc/repo-2.1.0.zip index b641e0b5bba..fdaf6321421 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.1.0.zip and b/core/src/test/resources/indices/bwc/repo-2.1.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.1.1.zip b/core/src/test/resources/indices/bwc/repo-2.1.1.zip index e08cde10b33..e29cf401628 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.1.1.zip and b/core/src/test/resources/indices/bwc/repo-2.1.1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.1.2.zip b/core/src/test/resources/indices/bwc/repo-2.1.2.zip index f9829c219f0..f1c371720c5 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.1.2.zip and b/core/src/test/resources/indices/bwc/repo-2.1.2.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.2.0.zip b/core/src/test/resources/indices/bwc/repo-2.2.0.zip index 703184dac1e..1501c1942ad 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.2.0.zip and b/core/src/test/resources/indices/bwc/repo-2.2.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.2.1.zip b/core/src/test/resources/indices/bwc/repo-2.2.1.zip index c665f79c11c..93e39514c3c 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.2.1.zip and b/core/src/test/resources/indices/bwc/repo-2.2.1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.2.2.zip b/core/src/test/resources/indices/bwc/repo-2.2.2.zip index 9e5e6fdd30d..5c937a3c6c1 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.2.2.zip and b/core/src/test/resources/indices/bwc/repo-2.2.2.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.3.0.zip b/core/src/test/resources/indices/bwc/repo-2.3.0.zip index f41df41224d..575232e09df 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.3.0.zip and b/core/src/test/resources/indices/bwc/repo-2.3.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.3.1.zip b/core/src/test/resources/indices/bwc/repo-2.3.1.zip index 78e736986ab..f0434a446f0 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.3.1.zip and b/core/src/test/resources/indices/bwc/repo-2.3.1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.3.2.zip b/core/src/test/resources/indices/bwc/repo-2.3.2.zip index b160856326a..c5ca8a34325 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.3.2.zip and b/core/src/test/resources/indices/bwc/repo-2.3.2.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.3.3.zip b/core/src/test/resources/indices/bwc/repo-2.3.3.zip index 411cbea5a22..4310f8e1efb 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.3.3.zip and b/core/src/test/resources/indices/bwc/repo-2.3.3.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.3.4.zip b/core/src/test/resources/indices/bwc/repo-2.3.4.zip index 4afa60f7c78..6abccc237b6 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.3.4.zip and b/core/src/test/resources/indices/bwc/repo-2.3.4.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.3.5.zip b/core/src/test/resources/indices/bwc/repo-2.3.5.zip index 5d2d00de961..f80ed069180 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.3.5.zip and b/core/src/test/resources/indices/bwc/repo-2.3.5.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.4.0.zip b/core/src/test/resources/indices/bwc/repo-2.4.0.zip index c5f3c0d0759..09591e1d7ed 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.4.0.zip and b/core/src/test/resources/indices/bwc/repo-2.4.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.4.1.zip b/core/src/test/resources/indices/bwc/repo-2.4.1.zip new file mode 100644 index 00000000000..2c88ecebe31 Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-2.4.1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-5.0.0.zip b/core/src/test/resources/indices/bwc/repo-5.0.0.zip new file mode 100644 index 00000000000..e873b9bc9a8 Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-5.0.0.zip differ diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json new file mode 100644 index 00000000000..3b6e373b556 --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json @@ -0,0 +1,69 @@ +{ + "mappings": { + "_default_": { + "_all": { + "norms": false + }, + "_meta": { + "version": "5.0.0-beta1" + }, + "dynamic_templates": [ + { + "fields": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "fields.*" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "input_type": { + "ignore_above": 1024, + "type": "keyword" + }, + "message": { + "norms": false, + "type": "text" + }, + "offset": { + "type": "long" + }, + "source": { + "ignore_above": 1024, + "type": "keyword" + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + }, + "order": 0, + "settings": { + "index.refresh_interval": "5s" + }, + "template": "filebeat-*" +} diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/logstash-5.0.template.json b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/logstash-5.0.template.json new file mode 100644 index 00000000000..fc6406c457a --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/logstash-5.0.template.json @@ -0,0 +1,45 @@ +{ + "template" : "logstash-*", + "settings" : { + "index.refresh_interval" : "5s" + }, + "mappings" : { + "_default_" : { + "_all" : {"enabled" : true, "norms" : false}, + "dynamic_templates" : [ { + "message_field" : { + "match" : "message", + "match_mapping_type" : "string", + "mapping" : { + "type" : "string", "index" : "analyzed", "norms" : false, + "fielddata" : { "format" : "disabled" } + } + } + }, { + "string_fields" : { + "match" : "*", + "match_mapping_type" : "string", + "mapping" : { + "type" : "text", "norms" : false, + "fields" : { + "keyword" : { "type": "keyword" } + } + } + } + } ], + "properties" : { + "@timestamp": { "type": "date", "include_in_all": false }, + "@version": { "type": "keyword", "include_in_all": false }, + "geoip" : { + "dynamic": true, + "properties" : { + "ip": { "type": "ip" }, + "location" : { "type" : "geo_point" }, + "latitude" : { "type" : "half_float" }, + "longitude" : { "type" : "half_float" } + } + } + } + } + } +} diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json new file mode 100644 index 00000000000..57786288db6 --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json @@ -0,0 +1,2658 @@ +{ + "mappings": { + "_default_": { + "_all": { + "norms": false + }, + "_meta": { + "version": "5.0.0-beta1" + }, + "dynamic_templates": [ + { + "fields": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "fields.*" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "apache": { + "properties": { + "status": { + "properties": { + "bytes_per_request": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "bytes_per_sec": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "connections": { + "properties": { + "async": { + "properties": { + "closing": { + "type": "long" + }, + "keep_alive": { + "type": "long" + }, + "writing": { + "type": "long" + } + } + }, + "total": { + "type": "long" + } + } + }, + "cpu": { + "properties": { + "children_system": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "children_user": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "load": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "system": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "user": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + }, + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "load": { + "properties": { + "1": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "15": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "5": { + "scaling_factor": 100, + "type": "scaled_float" + } + } + }, + "requests_per_sec": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "scoreboard": { + "properties": { + "closing_connection": { + "type": "long" + }, + "dns_lookup": { + "type": "long" + }, + "gracefully_finishing": { + "type": "long" + }, + "idle_cleanup": { + "type": "long" + }, + "keepalive": { + "type": "long" + }, + "logging": { + "type": "long" + }, + "open_slot": { + "type": "long" + }, + "reading_request": { + "type": "long" + }, + "sending_reply": { + "type": "long" + }, + "starting_up": { + "type": "long" + }, + "total": { + "type": "long" + }, + "waiting_for_connection": { + "type": "long" + } + } + }, + "total_accesses": { + "type": "long" + }, + "total_kbytes": { + "type": "long" + }, + "uptime": { + "properties": { + "server_uptime": { + "type": "long" + }, + "uptime": { + "type": "long" + } + } + }, + "workers": { + "properties": { + "busy": { + "type": "long" + }, + "idle": { + "type": "long" + } + } + } + } + } + } + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "haproxy": { + "properties": { + "info": { + "properties": { + "compress": { + "properties": { + "bps": { + "properties": { + "in": { + "type": "long" + }, + "out": { + "type": "long" + }, + "rate_limit": { + "type": "long" + } + } + } + } + }, + "conn": { + "properties": { + "rate": { + "properties": { + "limit": { + "type": "long" + }, + "value": { + "type": "long" + } + } + } + } + }, + "cum": { + "properties": { + "conns": { + "type": "long" + }, + "req": { + "type": "long" + }, + "ssl_conns": { + "type": "long" + } + } + }, + "curr": { + "properties": { + "conns": { + "type": "long" + }, + "ssl_conns": { + "type": "long" + } + } + }, + "idle_pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "max": { + "properties": { + "conn": { + "properties": { + "rate": { + "type": "long" + }, + "value": { + "type": "long" + } + } + }, + "hard_conn": { + "type": "long" + }, + "pipes": { + "type": "long" + }, + "sess_rate": { + "type": "long" + }, + "sock": { + "type": "long" + }, + "ssl": { + "properties": { + "conns": { + "type": "long" + }, + "rate": { + "type": "long" + } + } + }, + "zlib_mem_usage": { + "type": "long" + } + } + }, + "mem_max_bytes": { + "type": "long" + }, + "nb_proc": { + "type": "long" + }, + "pid": { + "type": "long" + }, + "pipes": { + "properties": { + "free": { + "type": "long" + }, + "used": { + "type": "long" + } + } + }, + "process_num": { + "type": "long" + }, + "run_queue": { + "type": "long" + }, + "sess": { + "properties": { + "rate": { + "properties": { + "limit": { + "type": "long" + }, + "value": { + "type": "long" + } + } + } + } + }, + "ssl": { + "properties": { + "backend": { + "properties": { + "key_rate": { + "type": "long" + }, + "max_key_rate": { + "type": "long" + } + } + }, + "cache_misses": { + "type": "long" + }, + "cached_lookups": { + "type": "long" + }, + "frontend": { + "properties": { + "key_rate": { + "type": "long" + }, + "max_key_rate": { + "type": "long" + }, + "session_reuse_pct": { + "type": "long" + } + } + }, + "rate": { + "properties": { + "limit": { + "type": "long" + }, + "value": { + "type": "long" + } + } + } + } + }, + "tasks": { + "type": "long" + }, + "ulimit_n": { + "type": "long" + }, + "uptime_sec": { + "type": "long" + }, + "zlib_mem_usage": { + "type": "long" + } + } + }, + "stat": { + "properties": { + "act": { + "type": "long" + }, + "bck": { + "type": "long" + }, + "bin": { + "type": "long" + }, + "bout": { + "type": "long" + }, + "check": { + "properties": { + "code": { + "type": "long" + }, + "duration": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "chkdown": { + "type": "long" + }, + "chkfail": { + "type": "long" + }, + "cli_abrt": { + "type": "long" + }, + "comp": { + "properties": { + "byp": { + "type": "long" + }, + "in": { + "type": "long" + }, + "out": { + "type": "long" + }, + "rsp": { + "type": "long" + } + } + }, + "component_type": { + "type": "long" + }, + "ctime": { + "type": "long" + }, + "downtime": { + "type": "long" + }, + "dreq": { + "type": "long" + }, + "dresp": { + "type": "long" + }, + "econ": { + "type": "long" + }, + "ereq": { + "type": "long" + }, + "eresp": { + "type": "long" + }, + "hanafail": { + "type": "long" + }, + "hrsp": { + "properties": { + "1xx": { + "type": "long" + }, + "2xx": { + "type": "long" + }, + "3xx": { + "type": "long" + }, + "4xx": { + "type": "long" + }, + "5xx": { + "type": "long" + }, + "other": { + "type": "long" + } + } + }, + "iid": { + "type": "long" + }, + "last": { + "properties": { + "agt": { + "ignore_above": 1024, + "type": "keyword" + }, + "chk": { + "ignore_above": 1024, + "type": "keyword" + }, + "sess": { + "type": "long" + } + } + }, + "lastchg": { + "type": "long" + }, + "lbtot": { + "type": "long" + }, + "pid": { + "type": "long" + }, + "pxname": { + "ignore_above": 1024, + "type": "keyword" + }, + "qcur": { + "type": "long" + }, + "qlimit": { + "type": "long" + }, + "qmax": { + "type": "long" + }, + "qtime": { + "type": "long" + }, + "rate": { + "properties": { + "lim": { + "type": "long" + }, + "max": { + "type": "long" + }, + "value": { + "type": "long" + } + } + }, + "req": { + "properties": { + "rate": { + "properties": { + "max": { + "type": "long" + }, + "value": { + "type": "long" + } + } + }, + "tot": { + "type": "long" + } + } + }, + "rtime": { + "type": "long" + }, + "scur": { + "type": "long" + }, + "sid": { + "type": "long" + }, + "slim": { + "type": "long" + }, + "smax": { + "type": "long" + }, + "srv_abrt": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + }, + "stot": { + "ignore_above": 1024, + "type": "keyword" + }, + "svname": { + "ignore_above": 1024, + "type": "keyword" + }, + "throttle": { + "type": "long" + }, + "tracked": { + "type": "long" + }, + "ttime": { + "type": "long" + }, + "weight": { + "type": "long" + }, + "wredis": { + "type": "long" + }, + "wretr": { + "type": "long" + } + } + } + } + }, + "metricset": { + "properties": { + "host": { + "ignore_above": 1024, + "type": "keyword" + }, + "module": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "rtt": { + "type": "long" + } + } + }, + "mongodb": { + "properties": { + "status": { + "properties": { + "asserts": { + "properties": { + "msg": { + "type": "long" + }, + "regular": { + "type": "long" + }, + "rollovers": { + "type": "long" + }, + "user": { + "type": "long" + }, + "warning": { + "type": "long" + } + } + }, + "background_flushing": { + "properties": { + "average": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "flushes": { + "type": "long" + }, + "last": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "last_finished": { + "type": "date" + }, + "total": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + }, + "connections": { + "properties": { + "available": { + "type": "long" + }, + "current": { + "type": "long" + }, + "total_created": { + "type": "long" + } + } + }, + "extra_info": { + "properties": { + "heap_usage": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "page_faults": { + "type": "long" + } + } + }, + "journaling": { + "properties": { + "commits": { + "type": "long" + }, + "commits_in_write_lock": { + "type": "long" + }, + "compression": { + "type": "long" + }, + "early_commits": { + "type": "long" + }, + "journaled": { + "properties": { + "mb": { + "type": "long" + } + } + }, + "times": { + "properties": { + "commits": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "commits_in_write_lock": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "dt": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "prep_log_buffer": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "remap_private_view": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "write_to_data_files": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "write_to_journal": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + }, + "write_to_data_files": { + "properties": { + "mb": { + "type": "long" + } + } + } + } + }, + "local_time": { + "type": "date" + }, + "memory": { + "properties": { + "bits": { + "type": "long" + }, + "mapped": { + "properties": { + "mb": { + "type": "long" + } + } + }, + "mapped_with_journal": { + "properties": { + "mb": { + "type": "long" + } + } + }, + "resident": { + "properties": { + "mb": { + "type": "long" + } + } + }, + "virtual": { + "properties": { + "mb": { + "type": "long" + } + } + } + } + }, + "network": { + "properties": { + "in": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "out": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "requests": { + "type": "long" + } + } + }, + "opcounters": { + "properties": { + "command": { + "type": "long" + }, + "delete": { + "type": "long" + }, + "getmore": { + "type": "long" + }, + "insert": { + "type": "long" + }, + "query": { + "type": "long" + }, + "update": { + "type": "long" + } + } + }, + "opcounters_replicated": { + "properties": { + "command": { + "type": "long" + }, + "delete": { + "type": "long" + }, + "getmore": { + "type": "long" + }, + "insert": { + "type": "long" + }, + "query": { + "type": "long" + }, + "update": { + "type": "long" + } + } + }, + "storage_engine": { + "properties": { + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "uptime": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + }, + "write_backs_queued": { + "type": "boolean" + } + } + } + } + }, + "mysql": { + "properties": { + "status": { + "properties": { + "aborted": { + "properties": { + "clients": { + "type": "long" + }, + "connects": { + "type": "long" + } + } + }, + "binlog": { + "properties": { + "cache": { + "properties": { + "disk_use": { + "type": "long" + }, + "use": { + "type": "long" + } + } + } + } + }, + "bytes": { + "properties": { + "received": { + "type": "long" + }, + "sent": { + "type": "long" + } + } + }, + "connections": { + "type": "long" + }, + "created": { + "properties": { + "tmp": { + "properties": { + "disk_tables": { + "type": "long" + }, + "files": { + "type": "long" + }, + "tables": { + "type": "long" + } + } + } + } + }, + "delayed": { + "properties": { + "errors": { + "type": "long" + }, + "insert_threads": { + "type": "long" + }, + "writes": { + "type": "long" + } + } + }, + "flush_commands": { + "type": "long" + }, + "max_used_connections": { + "type": "long" + }, + "open": { + "properties": { + "files": { + "type": "long" + }, + "streams": { + "type": "long" + }, + "tables": { + "type": "long" + } + } + }, + "opened_tables": { + "type": "long" + }, + "threads": { + "properties": { + "cached": { + "type": "long" + }, + "connected": { + "type": "long" + }, + "created": { + "type": "long" + }, + "running": { + "type": "long" + } + } + } + } + } + } + }, + "nginx": { + "properties": { + "stubstatus": { + "properties": { + "accepts": { + "type": "long" + }, + "active": { + "type": "long" + }, + "current": { + "type": "long" + }, + "dropped": { + "type": "long" + }, + "handled": { + "type": "long" + }, + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "reading": { + "type": "long" + }, + "requests": { + "type": "long" + }, + "waiting": { + "type": "long" + }, + "writing": { + "type": "long" + } + } + } + } + }, + "postgresql": { + "properties": { + "activity": { + "properties": { + "application_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "backend_start": { + "type": "date" + }, + "client": { + "properties": { + "address": { + "ignore_above": 1024, + "type": "keyword" + }, + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "port": { + "type": "long" + } + } + }, + "database": { + "properties": { + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "oid": { + "type": "long" + } + } + }, + "pid": { + "type": "long" + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + }, + "query_start": { + "type": "date" + }, + "state": { + "ignore_above": 1024, + "type": "keyword" + }, + "state_change": { + "type": "date" + }, + "transaction_start": { + "type": "date" + }, + "user": { + "properties": { + "id": { + "type": "long" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "waiting": { + "type": "boolean" + } + } + }, + "bgwriter": { + "properties": { + "buffers": { + "properties": { + "allocated": { + "type": "long" + }, + "backend": { + "type": "long" + }, + "backend_fsync": { + "type": "long" + }, + "checkpoints": { + "type": "long" + }, + "clean": { + "type": "long" + }, + "clean_full": { + "type": "long" + } + } + }, + "checkpoints": { + "properties": { + "requested": { + "type": "long" + }, + "scheduled": { + "type": "long" + }, + "times": { + "properties": { + "sync": { + "properties": { + "ms": { + "type": "float" + } + } + }, + "write": { + "properties": { + "ms": { + "type": "float" + } + } + } + } + } + } + }, + "stats_reset": { + "type": "date" + } + } + }, + "database": { + "properties": { + "blocks": { + "properties": { + "hit": { + "type": "long" + }, + "read": { + "type": "long" + }, + "time": { + "properties": { + "read": { + "properties": { + "ms": { + "type": "long" + } + } + }, + "write": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + } + } + }, + "conflicts": { + "type": "long" + }, + "deadlocks": { + "type": "long" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "number_of_backends": { + "type": "long" + }, + "oid": { + "type": "long" + }, + "rows": { + "properties": { + "deleted": { + "type": "long" + }, + "fetched": { + "type": "long" + }, + "inserted": { + "type": "long" + }, + "returned": { + "type": "long" + }, + "updated": { + "type": "long" + } + } + }, + "stats_reset": { + "type": "date" + }, + "temporary": { + "properties": { + "bytes": { + "type": "long" + }, + "files": { + "type": "long" + } + } + }, + "transactions": { + "properties": { + "commit": { + "type": "long" + }, + "rollback": { + "type": "long" + } + } + } + } + } + } + }, + "redis": { + "properties": { + "info": { + "properties": { + "clients": { + "properties": { + "biggest_input_buf": { + "type": "long" + }, + "blocked": { + "type": "long" + }, + "connected": { + "type": "long" + }, + "longest_output_list": { + "type": "long" + } + } + }, + "cluster": { + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "cpu": { + "properties": { + "used": { + "properties": { + "sys": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "sys_children": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "user": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "user_children": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + } + } + }, + "memory": { + "properties": { + "allocator": { + "ignore_above": 1024, + "type": "keyword" + }, + "used": { + "properties": { + "lua": { + "type": "long" + }, + "peak": { + "type": "long" + }, + "rss": { + "type": "long" + }, + "value": { + "type": "long" + } + } + } + } + }, + "persistence": { + "properties": { + "aof": { + "properties": { + "bgrewrite": { + "properties": { + "last_status": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "enabled": { + "type": "boolean" + }, + "rewrite": { + "properties": { + "current_time": { + "properties": { + "sec": { + "type": "long" + } + } + }, + "in_progress": { + "type": "boolean" + }, + "last_time": { + "properties": { + "sec": { + "type": "long" + } + } + }, + "scheduled": { + "type": "boolean" + } + } + }, + "write": { + "properties": { + "last_status": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "loading": { + "type": "boolean" + }, + "rdb": { + "properties": { + "bgsave": { + "properties": { + "current_time": { + "properties": { + "sec": { + "type": "long" + } + } + }, + "in_progress": { + "type": "boolean" + }, + "last_status": { + "ignore_above": 1024, + "type": "keyword" + }, + "last_time": { + "properties": { + "sec": { + "type": "long" + } + } + } + } + }, + "last_save": { + "properties": { + "changes_since": { + "type": "long" + }, + "time": { + "type": "long" + } + } + } + } + } + } + }, + "replication": { + "properties": { + "backlog": { + "properties": { + "active": { + "type": "long" + }, + "first_byte_offset": { + "type": "long" + }, + "histlen": { + "type": "long" + }, + "size": { + "type": "long" + } + } + }, + "connected_slaves": { + "type": "long" + }, + "master_offset": { + "type": "long" + }, + "role": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "server": { + "properties": { + "arch_bits": { + "ignore_above": 1024, + "type": "keyword" + }, + "build_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "config_file": { + "ignore_above": 1024, + "type": "keyword" + }, + "gcc_version": { + "ignore_above": 1024, + "type": "keyword" + }, + "git_dirty": { + "ignore_above": 1024, + "type": "keyword" + }, + "git_sha1": { + "ignore_above": 1024, + "type": "keyword" + }, + "hz": { + "type": "long" + }, + "lru_clock": { + "type": "long" + }, + "mode": { + "ignore_above": 1024, + "type": "keyword" + }, + "multiplexing_api": { + "ignore_above": 1024, + "type": "keyword" + }, + "os": { + "ignore_above": 1024, + "type": "keyword" + }, + "process_id": { + "type": "long" + }, + "run_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "tcp_port": { + "type": "long" + }, + "uptime": { + "type": "long" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "stats": { + "properties": { + "commands_processed": { + "type": "long" + }, + "connections": { + "properties": { + "received": { + "type": "long" + }, + "rejected": { + "type": "long" + } + } + }, + "instantaneous": { + "properties": { + "input_kbps": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ops_per_sec": { + "type": "long" + }, + "output_kbps": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + }, + "keys": { + "properties": { + "evicted": { + "type": "long" + }, + "expired": { + "type": "long" + } + } + }, + "keyspace": { + "properties": { + "hits": { + "type": "long" + }, + "misses": { + "type": "long" + } + } + }, + "latest_fork_usec": { + "type": "long" + }, + "migrate_cached_sockets": { + "type": "long" + }, + "net": { + "properties": { + "input": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "output": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + }, + "pubsub": { + "properties": { + "channels": { + "type": "long" + }, + "patterns": { + "type": "long" + } + } + }, + "sync": { + "properties": { + "full": { + "type": "long" + }, + "partial": { + "properties": { + "err": { + "type": "long" + }, + "ok": { + "type": "long" + } + } + } + } + } + } + } + } + }, + "keyspace": { + "properties": { + "avg_ttl": { + "type": "long" + }, + "expires": { + "type": "long" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "keys": { + "type": "long" + } + } + } + } + }, + "system": { + "properties": { + "core": { + "properties": { + "id": { + "type": "long" + }, + "idle": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "iowait": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "irq": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "nice": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "softirq": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "steal": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "system": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "user": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + } + } + }, + "cpu": { + "properties": { + "idle": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "iowait": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "irq": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "nice": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "softirq": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "steal": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "system": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "user": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + } + } + }, + "diskio": { + "properties": { + "io": { + "properties": { + "time": { + "type": "long" + } + } + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "read": { + "properties": { + "bytes": { + "type": "long" + }, + "count": { + "type": "long" + }, + "time": { + "type": "long" + } + } + }, + "serial_number": { + "ignore_above": 1024, + "type": "keyword" + }, + "write": { + "properties": { + "bytes": { + "type": "long" + }, + "count": { + "type": "long" + }, + "time": { + "type": "long" + } + } + } + } + }, + "filesystem": { + "properties": { + "available": { + "type": "long" + }, + "device_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "files": { + "type": "long" + }, + "free": { + "type": "long" + }, + "free_files": { + "type": "long" + }, + "mount_point": { + "ignore_above": 1024, + "type": "keyword" + }, + "total": { + "type": "long" + }, + "used": { + "properties": { + "bytes": { + "type": "long" + }, + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + } + } + }, + "fsstat": { + "properties": { + "count": { + "type": "long" + }, + "total_files": { + "type": "long" + }, + "total_size": { + "properties": { + "free": { + "type": "long" + }, + "total": { + "type": "long" + }, + "used": { + "type": "long" + } + } + } + } + }, + "load": { + "properties": { + "1": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "15": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "5": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "norm": { + "properties": { + "1": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "15": { + "scaling_factor": 100, + "type": "scaled_float" + }, + "5": { + "scaling_factor": 100, + "type": "scaled_float" + } + } + } + } + }, + "memory": { + "properties": { + "actual": { + "properties": { + "free": { + "type": "long" + }, + "used": { + "properties": { + "bytes": { + "type": "long" + }, + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + } + } + }, + "free": { + "type": "long" + }, + "swap": { + "properties": { + "free": { + "type": "long" + }, + "total": { + "type": "long" + }, + "used": { + "properties": { + "bytes": { + "type": "long" + }, + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + } + } + }, + "total": { + "type": "long" + }, + "used": { + "properties": { + "bytes": { + "type": "long" + }, + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + } + } + }, + "network": { + "properties": { + "in": { + "properties": { + "bytes": { + "type": "long" + }, + "dropped": { + "type": "long" + }, + "errors": { + "type": "long" + }, + "packets": { + "type": "long" + } + } + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "out": { + "properties": { + "bytes": { + "type": "long" + }, + "dropped": { + "type": "long" + }, + "errors": { + "type": "long" + }, + "packets": { + "type": "long" + } + } + } + } + }, + "process": { + "properties": { + "cgroup": { + "properties": { + "blkio": { + "properties": { + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "total": { + "properties": { + "bytes": { + "type": "long" + }, + "ios": { + "type": "long" + } + } + } + } + }, + "cpu": { + "properties": { + "cfs": { + "properties": { + "period": { + "properties": { + "us": { + "type": "long" + } + } + }, + "quota": { + "properties": { + "us": { + "type": "long" + } + } + }, + "shares": { + "type": "long" + } + } + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "rt": { + "properties": { + "period": { + "properties": { + "us": { + "type": "long" + } + } + }, + "runtime": { + "properties": { + "us": { + "type": "long" + } + } + } + } + }, + "stats": { + "properties": { + "periods": { + "type": "long" + }, + "throttled": { + "properties": { + "ns": { + "type": "long" + }, + "periods": { + "type": "long" + } + } + } + } + } + } + }, + "cpuacct": { + "properties": { + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "stats": { + "properties": { + "system": { + "properties": { + "ns": { + "type": "long" + } + } + }, + "user": { + "properties": { + "ns": { + "type": "long" + } + } + } + } + }, + "total": { + "properties": { + "ns": { + "type": "long" + } + } + } + } + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "memory": { + "properties": { + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "kmem": { + "properties": { + "failures": { + "type": "long" + }, + "limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "usage": { + "properties": { + "bytes": { + "type": "long" + }, + "max": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "kmem_tcp": { + "properties": { + "failures": { + "type": "long" + }, + "limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "usage": { + "properties": { + "bytes": { + "type": "long" + }, + "max": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "mem": { + "properties": { + "failures": { + "type": "long" + }, + "limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "usage": { + "properties": { + "bytes": { + "type": "long" + }, + "max": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "memsw": { + "properties": { + "failures": { + "type": "long" + }, + "limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "usage": { + "properties": { + "bytes": { + "type": "long" + }, + "max": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "stats": { + "properties": { + "active_anon": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "active_file": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "cache": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "hierarchical_memory_limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "hierarchical_memsw_limit": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "inactive_anon": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "inactive_file": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "major_page_faults": { + "type": "long" + }, + "mapped_file": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "page_faults": { + "type": "long" + }, + "pages_in": { + "type": "long" + }, + "pages_out": { + "type": "long" + }, + "rss": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "rss_huge": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "swap": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "unevictable": { + "properties": { + "bytes": { + "type": "long" + } + } + } + } + } + } + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "cmdline": { + "ignore_above": 1024, + "type": "keyword" + }, + "cpu": { + "properties": { + "start_time": { + "type": "date" + }, + "system": { + "type": "long" + }, + "total": { + "properties": { + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + }, + "ticks": { + "type": "long" + } + } + }, + "user": { + "type": "long" + } + } + }, + "fd": { + "properties": { + "limit": { + "properties": { + "hard": { + "type": "long" + }, + "soft": { + "type": "long" + } + } + }, + "open": { + "type": "long" + } + } + }, + "memory": { + "properties": { + "rss": { + "properties": { + "bytes": { + "type": "long" + }, + "pct": { + "scaling_factor": 1000, + "type": "scaled_float" + } + } + }, + "share": { + "type": "long" + }, + "size": { + "type": "long" + } + } + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "pgid": { + "type": "long" + }, + "pid": { + "type": "long" + }, + "ppid": { + "type": "long" + }, + "state": { + "ignore_above": 1024, + "type": "keyword" + }, + "username": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "zookeeper": { + "properties": { + "mntr": { + "properties": { + "approximate_data_size": { + "type": "long" + }, + "ephemerals_count": { + "type": "long" + }, + "followers": { + "type": "long" + }, + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "latency": { + "properties": { + "avg": { + "type": "long" + }, + "max": { + "type": "long" + }, + "min": { + "type": "long" + } + } + }, + "max_file_descriptor_count": { + "type": "long" + }, + "num_alive_connections": { + "type": "long" + }, + "open_file_descriptor_count": { + "type": "long" + }, + "outstanding_requests": { + "type": "long" + }, + "packets": { + "properties": { + "received": { + "type": "long" + }, + "sent": { + "type": "long" + } + } + }, + "pending_syncs": { + "type": "long" + }, + "server_state": { + "ignore_above": 1024, + "type": "keyword" + }, + "synced_followers": { + "type": "long" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + }, + "watch_count": { + "type": "long" + }, + "znode_count": { + "type": "long" + } + } + } + } + } + } + } + }, + "order": 0, + "settings": { + "index.refresh_interval": "5s" + }, + "template": "metricbeat-*" +} diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json new file mode 100644 index 00000000000..22b889176aa --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json @@ -0,0 +1,1360 @@ +{ + "mappings": { + "_default_": { + "_all": { + "norms": false + }, + "_meta": { + "version": "5.0.0-beta1" + }, + "dynamic_templates": [ + { + "fields": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "fields.*" + } + }, + { + "amqp.headers": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "amqp.headers.*" + } + }, + { + "cassandra.response.supported": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "cassandra.response.supported.*" + } + }, + { + "http.request.headers": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "http.request.headers.*" + } + }, + { + "http.response.headers": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "http.response.headers.*" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "amqp": { + "properties": { + "app-id": { + "ignore_above": 1024, + "type": "keyword" + }, + "auto-delete": { + "type": "boolean" + }, + "class-id": { + "type": "long" + }, + "consumer-count": { + "type": "long" + }, + "consumer-tag": { + "ignore_above": 1024, + "type": "keyword" + }, + "content-encoding": { + "ignore_above": 1024, + "type": "keyword" + }, + "content-type": { + "ignore_above": 1024, + "type": "keyword" + }, + "correlation-id": { + "ignore_above": 1024, + "type": "keyword" + }, + "delivery-mode": { + "ignore_above": 1024, + "type": "keyword" + }, + "delivery-tag": { + "type": "long" + }, + "durable": { + "type": "boolean" + }, + "exchange": { + "ignore_above": 1024, + "type": "keyword" + }, + "exchange-type": { + "ignore_above": 1024, + "type": "keyword" + }, + "exclusive": { + "type": "boolean" + }, + "expiration": { + "ignore_above": 1024, + "type": "keyword" + }, + "if-empty": { + "type": "boolean" + }, + "if-unused": { + "type": "boolean" + }, + "immediate": { + "type": "boolean" + }, + "mandatory": { + "type": "boolean" + }, + "message-count": { + "type": "long" + }, + "message-id": { + "ignore_above": 1024, + "type": "keyword" + }, + "method-id": { + "type": "long" + }, + "multiple": { + "type": "boolean" + }, + "no-ack": { + "type": "boolean" + }, + "no-local": { + "type": "boolean" + }, + "no-wait": { + "type": "boolean" + }, + "passive": { + "type": "boolean" + }, + "priority": { + "type": "long" + }, + "queue": { + "ignore_above": 1024, + "type": "keyword" + }, + "redelivered": { + "type": "boolean" + }, + "reply-code": { + "type": "long" + }, + "reply-text": { + "ignore_above": 1024, + "type": "keyword" + }, + "reply-to": { + "ignore_above": 1024, + "type": "keyword" + }, + "routing-key": { + "ignore_above": 1024, + "type": "keyword" + }, + "timestamp": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "user-id": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "bytes_in": { + "type": "long" + }, + "bytes_out": { + "type": "long" + }, + "cassandra": { + "properties": { + "request": { + "properties": { + "headers": { + "properties": { + "flags": { + "ignore_above": 1024, + "type": "keyword" + }, + "length": { + "type": "long" + }, + "op": { + "ignore_above": 1024, + "type": "keyword" + }, + "stream": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "type": "long" + } + } + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "response": { + "properties": { + "authentication": { + "properties": { + "class": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "error": { + "properties": { + "code": { + "type": "long" + }, + "details": { + "properties": { + "alive": { + "type": "long" + }, + "arg_types": { + "ignore_above": 1024, + "type": "keyword" + }, + "blockfor": { + "type": "long" + }, + "data_present": { + "type": "boolean" + }, + "function": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "num_failures": { + "ignore_above": 1024, + "type": "keyword" + }, + "read_consistency": { + "ignore_above": 1024, + "type": "keyword" + }, + "received": { + "type": "long" + }, + "required": { + "type": "long" + }, + "stmt_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + }, + "write_type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "msg": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "event": { + "properties": { + "change": { + "ignore_above": 1024, + "type": "keyword" + }, + "host": { + "ignore_above": 1024, + "type": "keyword" + }, + "schema_change": { + "properties": { + "args": { + "ignore_above": 1024, + "type": "keyword" + }, + "change": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "object": { + "ignore_above": 1024, + "type": "keyword" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + }, + "target": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "headers": { + "properties": { + "flags": { + "ignore_above": 1024, + "type": "keyword" + }, + "length": { + "type": "long" + }, + "op": { + "ignore_above": 1024, + "type": "keyword" + }, + "stream": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "type": "long" + } + } + }, + "result": { + "properties": { + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "prepared": { + "properties": { + "prepared_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "req_meta": { + "properties": { + "col_count": { + "type": "long" + }, + "flags": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "paging_state": { + "ignore_above": 1024, + "type": "keyword" + }, + "pkey_columns": { + "type": "long" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "resp_meta": { + "properties": { + "col_count": { + "type": "long" + }, + "flags": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "paging_state": { + "ignore_above": 1024, + "type": "keyword" + }, + "pkey_columns": { + "type": "long" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "rows": { + "properties": { + "meta": { + "properties": { + "col_count": { + "type": "long" + }, + "flags": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "paging_state": { + "ignore_above": 1024, + "type": "keyword" + }, + "pkey_columns": { + "type": "long" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "num_rows": { + "type": "long" + } + } + }, + "schema_change": { + "properties": { + "args": { + "ignore_above": 1024, + "type": "keyword" + }, + "change": { + "ignore_above": 1024, + "type": "keyword" + }, + "keyspace": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "object": { + "ignore_above": 1024, + "type": "keyword" + }, + "table": { + "ignore_above": 1024, + "type": "keyword" + }, + "target": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "warnings": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "client_ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "client_location": { + "type": "geo_point" + }, + "client_port": { + "ignore_above": 1024, + "type": "keyword" + }, + "client_proc": { + "ignore_above": 1024, + "type": "keyword" + }, + "client_server": { + "ignore_above": 1024, + "type": "keyword" + }, + "client_service": { + "ignore_above": 1024, + "type": "keyword" + }, + "connection_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "connecttime": { + "type": "long" + }, + "cpu_time": { + "type": "long" + }, + "dest": { + "properties": { + "ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "ip_location": { + "type": "geo_point" + }, + "ipv6": { + "ignore_above": 1024, + "type": "keyword" + }, + "ipv6_location": { + "type": "geo_point" + }, + "mac": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ip_location": { + "type": "geo_point" + }, + "outer_ipv6": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ipv6_location": { + "type": "geo_point" + }, + "port": { + "ignore_above": 1024, + "type": "keyword" + }, + "stats": { + "properties": { + "net_bytes_total": { + "type": "long" + }, + "net_packets_total": { + "type": "long" + } + } + } + } + }, + "direction": { + "ignore_above": 1024, + "type": "keyword" + }, + "dns": { + "properties": { + "additionals": { + "properties": { + "class": { + "ignore_above": 1024, + "type": "keyword" + }, + "data": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "ttl": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "additionals_count": { + "type": "long" + }, + "answers": { + "properties": { + "class": { + "ignore_above": 1024, + "type": "keyword" + }, + "data": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "ttl": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "answers_count": { + "type": "long" + }, + "authorities": { + "properties": { + "class": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "authorities_count": { + "type": "long" + }, + "flags": { + "properties": { + "authentic_data": { + "type": "boolean" + }, + "authoritative": { + "type": "boolean" + }, + "checking_disabled": { + "type": "boolean" + }, + "recursion_available": { + "type": "boolean" + }, + "recursion_desired": { + "type": "boolean" + }, + "truncated_response": { + "type": "boolean" + } + } + }, + "id": { + "type": "long" + }, + "op_code": { + "ignore_above": 1024, + "type": "keyword" + }, + "opt": { + "properties": { + "do": { + "type": "boolean" + }, + "ext_rcode": { + "ignore_above": 1024, + "type": "keyword" + }, + "udp_size": { + "type": "long" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "question": { + "properties": { + "class": { + "ignore_above": 1024, + "type": "keyword" + }, + "etld_plus_one": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "response_code": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "dnstime": { + "type": "long" + }, + "domloadtime": { + "type": "long" + }, + "final": { + "ignore_above": 1024, + "type": "keyword" + }, + "flow_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "http": { + "properties": { + "request": { + "properties": { + "body": { + "norms": false, + "type": "text" + }, + "params": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "response": { + "properties": { + "body": { + "ignore_above": 1024, + "type": "keyword" + }, + "code": { + "ignore_above": 1024, + "type": "keyword" + }, + "phrase": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "icmp": { + "properties": { + "request": { + "properties": { + "code": { + "type": "long" + }, + "message": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "type": "long" + } + } + }, + "response": { + "properties": { + "code": { + "type": "long" + }, + "message": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "type": "long" + } + } + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "icmp_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "last_time": { + "type": "date" + }, + "loadtime": { + "type": "long" + }, + "memcache": { + "properties": { + "protocol_type": { + "ignore_above": 1024, + "type": "keyword" + }, + "request": { + "properties": { + "automove": { + "ignore_above": 1024, + "type": "keyword" + }, + "bytes": { + "type": "long" + }, + "cas_unique": { + "type": "long" + }, + "command": { + "ignore_above": 1024, + "type": "keyword" + }, + "count_values": { + "type": "long" + }, + "delta": { + "type": "long" + }, + "dest_class": { + "type": "long" + }, + "exptime": { + "type": "long" + }, + "flags": { + "type": "long" + }, + "initial": { + "type": "long" + }, + "line": { + "ignore_above": 1024, + "type": "keyword" + }, + "noreply": { + "type": "boolean" + }, + "opaque": { + "type": "long" + }, + "opcode": { + "ignore_above": 1024, + "type": "keyword" + }, + "opcode_value": { + "type": "long" + }, + "quiet": { + "type": "boolean" + }, + "raw_args": { + "ignore_above": 1024, + "type": "keyword" + }, + "sleep_us": { + "type": "long" + }, + "source_class": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "vbucket": { + "type": "long" + }, + "verbosity": { + "type": "long" + } + } + }, + "response": { + "properties": { + "bytes": { + "type": "long" + }, + "cas_unique": { + "type": "long" + }, + "command": { + "ignore_above": 1024, + "type": "keyword" + }, + "count_values": { + "type": "long" + }, + "error_msg": { + "ignore_above": 1024, + "type": "keyword" + }, + "flags": { + "type": "long" + }, + "opaque": { + "type": "long" + }, + "opcode": { + "ignore_above": 1024, + "type": "keyword" + }, + "opcode_value": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + }, + "status_code": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "value": { + "type": "long" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "method": { + "ignore_above": 1024, + "type": "keyword" + }, + "mongodb": { + "properties": { + "cursorId": { + "ignore_above": 1024, + "type": "keyword" + }, + "error": { + "ignore_above": 1024, + "type": "keyword" + }, + "fullCollectionName": { + "ignore_above": 1024, + "type": "keyword" + }, + "numberReturned": { + "type": "long" + }, + "numberToReturn": { + "type": "long" + }, + "numberToSkip": { + "type": "long" + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + }, + "returnFieldsSelector": { + "ignore_above": 1024, + "type": "keyword" + }, + "selector": { + "ignore_above": 1024, + "type": "keyword" + }, + "startingFrom": { + "ignore_above": 1024, + "type": "keyword" + }, + "update": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "mysql": { + "properties": { + "affected_rows": { + "type": "long" + }, + "error_code": { + "type": "long" + }, + "error_message": { + "ignore_above": 1024, + "type": "keyword" + }, + "insert_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "iserror": { + "type": "boolean" + }, + "num_fields": { + "ignore_above": 1024, + "type": "keyword" + }, + "num_rows": { + "ignore_above": 1024, + "type": "keyword" + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "nfs": { + "properties": { + "minor_version": { + "type": "long" + }, + "opcode": { + "ignore_above": 1024, + "type": "keyword" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + }, + "tag": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "type": "long" + } + } + }, + "notes": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_vlan": { + "ignore_above": 1024, + "type": "keyword" + }, + "params": { + "norms": false, + "type": "text" + }, + "path": { + "ignore_above": 1024, + "type": "keyword" + }, + "pgsql": { + "properties": { + "error_code": { + "type": "long" + }, + "error_message": { + "ignore_above": 1024, + "type": "keyword" + }, + "error_severity": { + "ignore_above": 1024, + "type": "keyword" + }, + "iserror": { + "type": "boolean" + }, + "num_fields": { + "ignore_above": 1024, + "type": "keyword" + }, + "num_rows": { + "ignore_above": 1024, + "type": "keyword" + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "port": { + "ignore_above": 1024, + "type": "keyword" + }, + "proc": { + "ignore_above": 1024, + "type": "keyword" + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + }, + "real_ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "redis": { + "properties": { + "error": { + "ignore_above": 1024, + "type": "keyword" + }, + "return_value": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "release": { + "ignore_above": 1024, + "type": "keyword" + }, + "request": { + "norms": false, + "type": "text" + }, + "resource": { + "ignore_above": 1024, + "type": "keyword" + }, + "response": { + "norms": false, + "type": "text" + }, + "responsetime": { + "type": "long" + }, + "rpc": { + "properties": { + "auth_flavor": { + "ignore_above": 1024, + "type": "keyword" + }, + "call_size": { + "type": "long" + }, + "cred": { + "properties": { + "gid": { + "type": "long" + }, + "gids": { + "ignore_above": 1024, + "type": "keyword" + }, + "machinename": { + "ignore_above": 1024, + "type": "keyword" + }, + "stamp": { + "type": "long" + }, + "uid": { + "type": "long" + } + } + }, + "reply_size": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + }, + "time": { + "type": "long" + }, + "time_str": { + "ignore_above": 1024, + "type": "keyword" + }, + "xid": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "server": { + "ignore_above": 1024, + "type": "keyword" + }, + "service": { + "ignore_above": 1024, + "type": "keyword" + }, + "source": { + "properties": { + "ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "ip_location": { + "type": "geo_point" + }, + "ipv6": { + "ignore_above": 1024, + "type": "keyword" + }, + "ipv6_location": { + "type": "geo_point" + }, + "mac": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ip_location": { + "type": "geo_point" + }, + "outer_ipv6": { + "ignore_above": 1024, + "type": "keyword" + }, + "outer_ipv6_location": { + "type": "geo_point" + }, + "port": { + "ignore_above": 1024, + "type": "keyword" + }, + "stats": { + "properties": { + "net_bytes_total": { + "type": "long" + }, + "net_packets_total": { + "type": "long" + } + } + } + } + }, + "start_time": { + "type": "date" + }, + "status": { + "ignore_above": 1024, + "type": "keyword" + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + }, + "thrift": { + "properties": { + "exceptions": { + "ignore_above": 1024, + "type": "keyword" + }, + "params": { + "ignore_above": 1024, + "type": "keyword" + }, + "return_value": { + "ignore_above": 1024, + "type": "keyword" + }, + "service": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "transport": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "vlan": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + }, + "order": 0, + "settings": { + "index.refresh_interval": "5s" + }, + "template": "packetbeat-*" +} diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json new file mode 100644 index 00000000000..017a3b0c326 --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json @@ -0,0 +1,162 @@ +{ + "mappings": { + "_default_": { + "_all": { + "norms": false + }, + "_meta": { + "version": "5.0.0-beta1" + }, + "dynamic_templates": [ + { + "fields": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "fields.*" + } + }, + { + "event_data": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "event_data.*" + } + }, + { + "user_data": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string", + "path_match": "user_data.*" + } + } + ], + "properties": { + "@timestamp": { + "type": "date" + }, + "activity_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "computer_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "event_id": { + "type": "long" + }, + "keywords": { + "ignore_above": 1024, + "type": "keyword" + }, + "level": { + "ignore_above": 1024, + "type": "keyword" + }, + "log_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "message": { + "norms": false, + "type": "text" + }, + "message_error": { + "ignore_above": 1024, + "type": "keyword" + }, + "opcode": { + "ignore_above": 1024, + "type": "keyword" + }, + "process_id": { + "type": "long" + }, + "provider_guid": { + "ignore_above": 1024, + "type": "keyword" + }, + "record_number": { + "ignore_above": 1024, + "type": "keyword" + }, + "related_activity_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "source_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + }, + "task": { + "ignore_above": 1024, + "type": "keyword" + }, + "thread_id": { + "type": "long" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "user": { + "properties": { + "domain": { + "ignore_above": 1024, + "type": "keyword" + }, + "identifier": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "version": { + "type": "long" + }, + "xml": { + "norms": false, + "type": "text" + } + } + } + }, + "order": 0, + "settings": { + "index.refresh_interval": "5s" + }, + "template": "winlogbeat-*" +} diff --git a/core/src/test/resources/org/elasticsearch/search/query/all-example-document.json b/core/src/test/resources/org/elasticsearch/search/query/all-example-document.json new file mode 100644 index 00000000000..9e4d04930a7 --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/search/query/all-example-document.json @@ -0,0 +1,36 @@ +{ + "f1": "foo", + "f2": "Bar", + "f3": "foo bar baz", + "f_multi": "Foo Bar Baz", + "f_object": { + "sub1": "sfoo", + "sub2":"sbar", + "sub3":19 + }, + "f_nested": { + "nest1": "nfoo", + "nest2":"nbar", + "nest3":21 + }, + "f_date": "1476383971", + "f_bool": "true", + "f_byte": "7", + "f_short": "23", + "f_int": "1293", + "f_long": "42", + "f_float": "1.7", + "f_hfloat": "1.5", + "f_sfloat": "12.23", + "f_ip": "127.0.0.1", + "f_binary": "VGhpcyBpcyBzb21lIGJpbmFyeSBkYXRhCg==", + "f_suggest": { + "input": ["Nevermind", "Nirvana"], + "weight": 34 + }, + "f_geop": "41.12,-71.34", + "f_geos": { + "type": "point", + "coordinates": [-77.03653, 38.897676] + } +} diff --git a/core/src/test/resources/org/elasticsearch/search/query/all-query-index-with-all.json b/core/src/test/resources/org/elasticsearch/search/query/all-query-index-with-all.json new file mode 100644 index 00000000000..05de13b4261 --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/search/query/all-query-index-with-all.json @@ -0,0 +1,35 @@ +{ + "settings": { + "index": { + "number_of_shards": 1, + "number_of_replicas": 0, + "analysis": { + "analyzer": { + "my_ngrams": { + "type": "custom", + "tokenizer": "standard", + "filter": ["my_ngrams"] + } + }, + "filter": { + "my_ngrams": { + "type": "ngram", + "min_gram": 2, + "max_gram": 2 + } + } + } + } + }, + "mappings": { + "doc": { + "_all": { + "enabled": true + }, + "properties": { + "f1": {"type": "text"}, + "f2": {"type": "text", "analyzer": "my_ngrams"} + } + } + } +} diff --git a/core/src/test/resources/org/elasticsearch/search/query/all-query-index.json b/core/src/test/resources/org/elasticsearch/search/query/all-query-index.json new file mode 100644 index 00000000000..1a67abcfcb3 --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/search/query/all-query-index.json @@ -0,0 +1,80 @@ +{ + "settings": { + "index": { + "number_of_shards": 1, + "number_of_replicas": 0, + "analysis": { + "analyzer": { + "my_ngrams": { + "type": "custom", + "tokenizer": "standard", + "filter": ["my_ngrams"] + } + }, + "filter": { + "my_ngrams": { + "type": "ngram", + "min_gram": 2, + "max_gram": 2 + } + } + } + } + }, + "mappings": { + "doc": { + "_all": { + "enabled": false + }, + "properties": { + "f1": {"type": "text"}, + "f2": {"type": "keyword"}, + "f3": {"type": "text", "analyzer": "my_ngrams"}, + "f4": { + "type": "text", + "index_options": "docs" + }, + "f_multi": { + "type": "text", + "fields": { + "raw": {"type": "keyword"}, + "f_token_count": {"type": "token_count", "analyzer": "standard"} + } + }, + "f_object": { + "type": "object", + "properties": { + "sub1": {"type": "text"}, + "sub2": {"type": "keyword"}, + "sub3": {"type": "integer"} + } + }, + "f_nested": { + "type": "nested", + "properties": { + "nest1": {"type": "text"}, + "nest2": {"type": "keyword"}, + "nest3": {"type": "integer"} + } + }, + "f_date": { + "type": "date", + "format": "yyyy/MM/dd||epoch_millis" + }, + "f_bool": {"type": "boolean"}, + "f_byte": {"type": "byte"}, + "f_short": {"type": "short"}, + "f_int": {"type": "integer"}, + "f_long": {"type": "long"}, + "f_float": {"type": "float"}, + "f_hfloat": {"type": "half_float"}, + "f_sfloat": {"type": "scaled_float", "scaling_factor": 100}, + "f_ip": {"type": "ip"}, + "f_binary": {"type": "binary"}, + "f_suggest": {"type": "completion"}, + "f_geop": {"type": "geo_point"}, + "f_geos": {"type": "geo_shape"} + } + } + } +} diff --git a/dev-tools/create_bwc_index.py b/dev-tools/create_bwc_index.py index 5a262d23cb6..5751335d1e3 100644 --- a/dev-tools/create_bwc_index.py +++ b/dev-tools/create_bwc_index.py @@ -103,7 +103,7 @@ def delete_by_query(es, version, index_name, doc_type): return deleted_count = es.count(index=index_name, doc_type=doc_type, body=query)['count'] - + result = es.delete_by_query(index=index_name, doc_type=doc_type, body=query) @@ -113,9 +113,13 @@ def delete_by_query(es, version, index_name, doc_type): logging.info('Deleted %d docs' % deleted_count) -def run_basic_asserts(es, index_name, type, num_docs): +def run_basic_asserts(es, version, index_name, type, num_docs): count = es.count(index=index_name)['count'] assert count == num_docs, 'Expected %r but got %r documents' % (num_docs, count) + if parse_version(version) < parse_version('5.1.0'): + # This alias isn't allowed to be created after 5.1 so we can verify that we can still use it + count = es.count(index='#' + index_name)['count'] + assert count == num_docs, 'Expected %r but got %r documents' % (num_docs, count) for _ in range(0, num_docs): random_doc_id = random.randint(0, num_docs-1) doc = es.get(index=index_name, doc_type=type, id=random_doc_id) @@ -265,12 +269,20 @@ def generate_index(client, version, index_name): mappings['doc'] = {'properties' : {}} supports_dots_in_field_names = parse_version(version) >= parse_version("2.4.0") if supports_dots_in_field_names: - mappings["doc"]['properties'].update({ + + if parse_version(version) < parse_version("5.0.0-alpha1"): + mappings["doc"]['properties'].update({ 'field.with.dots': { 'type': 'string', 'boost': 4 } }) + else: + mappings["doc"]['properties'].update({ + 'field.with.dots': { + 'type': 'text' + } + }) if parse_version(version) < parse_version("5.0.0-alpha1"): mappings['norms'] = { @@ -339,7 +351,10 @@ def generate_index(client, version, index_name): if warmers: body['warmers'] = warmers client.indices.create(index=index_name, body=body) - health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0) + if parse_version(version) < parse_version("5.0.0-alpha1"): + health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0) + else: + health = client.cluster.health(wait_for_status='green', wait_for_no_relocating_shards=True) assert health['timed_out'] == False, 'cluster health timed out %s' % health num_docs = random.randint(2000, 3000) @@ -349,8 +364,11 @@ def generate_index(client, version, index_name): # see https://github.com/elastic/elasticsearch/issues/5817 num_docs = int(num_docs / 10) index_documents(client, index_name, 'doc', num_docs, supports_dots_in_field_names) + if parse_version(version) < parse_version('5.1.0'): + logging.info("Adding a alias that can't be created in 5.1+ so we can assert that we can still use it") + client.indices.put_alias(index=index_name, name='#' + index_name) logging.info('Running basic asserts on the data added') - run_basic_asserts(client, index_name, 'doc', num_docs) + run_basic_asserts(client, version, index_name, 'doc', num_docs) return num_docs, supports_dots_in_field_names def snapshot_index(client, version, repo_dir): @@ -483,7 +501,7 @@ def create_bwc_index(cfg, version): if node is not None: # This only happens if we've hit an exception: shutdown_node(node) - + shutil.rmtree(tmp_dir) def shutdown_node(node): @@ -522,4 +540,3 @@ if __name__ == '__main__': main() except KeyboardInterrupt: print('Caught keyboard interrupt, exiting...') - diff --git a/dev-tools/get-bwc-version.py b/dev-tools/get-bwc-version.py index 54c559d1dc8..4ef9736ea06 100644 --- a/dev-tools/get-bwc-version.py +++ b/dev-tools/get-bwc-version.py @@ -9,7 +9,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on +# software distributed under the License is distributed on # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. @@ -38,7 +38,7 @@ def parse_config(): def main(): c = parse_config() - + if not os.path.exists(c.path): print('Creating %s' % c.path) os.mkdir(c.path) @@ -53,7 +53,7 @@ def main(): shutil.rmtree(version_dir) else: print('Version %s exists at %s' % (c.version, version_dir)) - return + return # before 1.4.0, the zip file contains windows scripts, and tar.gz contained *nix scripts if is_windows: @@ -67,14 +67,14 @@ def main(): elif c.version.startswith('0.') or c.version.startswith('1.'): url = 'https://download.elasticsearch.org/elasticsearch/elasticsearch/%s' % filename else: - url = 'http://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/%s/%s' % (c.version, filename) + url = 'https://artifacts.elastic.co/downloads/elasticsearch/%s' % filename print('Downloading %s' % url) urllib.request.urlretrieve(url, filename) print('Extracting to %s' % version_dir) if is_windows: archive = zipfile.ZipFile(filename) - archive.extractall() + archive.extractall() else: # for some reason python's tarfile module has trouble with ES tgz? subprocess.check_call('tar -xzf %s' % filename, shell=True) diff --git a/dev-tools/prepare_release_candidate.py b/dev-tools/prepare_release_candidate.py deleted file mode 100644 index 84c10f4b9e4..00000000000 --- a/dev-tools/prepare_release_candidate.py +++ /dev/null @@ -1,417 +0,0 @@ -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on -# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -# either express or implied. See the License for the specific -# language governing permissions and limitations under the License. - -# Prepare a release -# -# 1. Update the Version.java to remove the snapshot bit -# 2. Remove the -SNAPSHOT suffix in all pom.xml files -# -# USAGE: -# -# python3 ./dev-tools/prepare-release.py -# -# Note: Ensure the script is run from the elasticsearch top level directory -# - -import fnmatch -import argparse -from prepare_release_update_documentation import update_reference_docs -import subprocess -import tempfile -import re -import os -import shutil -from functools import partial -import sys - -VERSION_FILE = 'core/src/main/java/org/elasticsearch/Version.java' -POM_FILE = 'pom.xml' -MAIL_TEMPLATE = """ -Hi all - -The new release candidate for %(version)s is now available, including the x-plugins and RPM/deb repos. This release is based on: - - * Elasticsearch commit: %(hash)s - https://github.com/elastic/elasticsearch/commit/%(hash)s - * X-Plugins commit: FILL_IN_X-PLUGINS_HASH - https://github.com/elastic/x-plugins/commit/FILL_IN_X-PLUGINS_HASH - -The packages may be downloaded from the following URLs: - - * ZIP - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/zip/elasticsearch/%(version)s/elasticsearch-%(version)s.zip - * tar.gz - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/tar/elasticsearch/%(version)s/elasticsearch-%(version)s.tar.gz - * RPM - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm - * deb - http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb - -Plugins can be installed as follows: - - ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install cloud-aws - -The same goes for the x-plugins: - - ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install license - ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install marvel-agent - ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install shield - ES_JAVA_OPTS="-Des.plugins.staging=true" bin/elasticsearch-plugin install watcher - -To install the deb from an APT repo: - -APT line sources.list line: - -deb http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/debian/ stable main - -To install the RPM, create a YUM file like: - - /etc/yum.repos.d/elasticsearch.repo - -containing: - -[elasticsearch-2.0] -name=Elasticsearch repository for packages -baseurl=http://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/centos -gpgcheck=1 -gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch -enabled=1 - -To smoke-test the release please run: - - python3 -B ./dev-tools/smoke_test_rc.py --version %(version)s --hash %(hash)s --plugins license,shield,watcher - -NOTE: this script requires JAVA_HOME to point to a Java 7 Runtime - -""" - -# console colors -COLOR_OK = '\033[92m' -COLOR_END = '\033[0m' -COLOR_FAIL = '\033[91m' - -def run(command, env_vars=None): - if env_vars: - for key, value in env_vars.items(): - os.putenv(key, value) - print('*** Running: %s%s%s' % (COLOR_OK, command, COLOR_END)) - if os.system(command): - raise RuntimeError(' FAILED: %s' % (command)) - -def ensure_checkout_is_clean(): - # Make sure no local mods: - s = subprocess.check_output('git diff --shortstat', shell=True).decode('utf-8') - if len(s) > 0: - raise RuntimeError('git diff --shortstat is non-empty got:\n%s' % s) - - # Make sure no untracked files: - s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace') - if 'Untracked files:' in s: - if 'dev-tools/__pycache__/' in s: - print('*** NOTE: invoke python with -B to prevent __pycache__ directories ***') - raise RuntimeError('git status shows untracked files got:\n%s' % s) - - # Make sure we have all changes from origin: - if 'is behind' in s: - raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin" in this branch got:\n%s' % (s)) - - # Make sure we no local unpushed changes (this is supposed to be a clean area): - if 'is ahead' in s: - raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout ", "git reset --hard origin/" in this branch got:\n%s' % (s)) - -# Reads the given file and applies the -# callback to it. If the callback changed -# a line the given file is replaced with -# the modified input. -def process_file(file_path, line_callback): - fh, abs_path = tempfile.mkstemp() - modified = False - with open(abs_path,'w', encoding='utf-8') as new_file: - with open(file_path, encoding='utf-8') as old_file: - for line in old_file: - new_line = line_callback(line) - modified = modified or (new_line != line) - new_file.write(new_line) - os.close(fh) - if modified: - #Remove original file - os.remove(file_path) - #Move new file - shutil.move(abs_path, file_path) - return True - else: - # nothing to do - just remove the tmp file - os.remove(abs_path) - return False - -# Moves the Version.java file from a snapshot to a release -def remove_version_snapshot(version_file, release): - # 1.0.0.Beta1 -> 1_0_0_Beta1 - release = release.replace('.', '_') - release = release.replace('-', '_') - pattern = 'new Version(V_%s_ID, true' % (release) - replacement = 'new Version(V_%s_ID, false' % (release) - def callback(line): - return line.replace(pattern, replacement) - processed = process_file(version_file, callback) - if not processed: - raise RuntimeError('failed to remove snapshot version for %s' % (release)) - -def rename_local_meta_files(path): - for root, _, file_names in os.walk(path): - for file_name in fnmatch.filter(file_names, 'maven-metadata-local.xml*'): - full_path = os.path.join(root, file_name) - os.rename(full_path, os.path.join(root, file_name.replace('-local', ''))) - -# Checks the pom.xml for the release version. -# This method fails if the pom file has no SNAPSHOT version set ie. -# if the version is already on a release version we fail. -# Returns the next version string ie. 0.90.7 -def find_release_version(): - with open('pom.xml', encoding='utf-8') as file: - for line in file: - match = re.search(r'(.+)-SNAPSHOT', line) - if match: - return match.group(1) - raise RuntimeError('Could not find release version in branch') - -# Checks if the produced RPM is signed with the supplied GPG key -def ensure_rpm_is_signed(rpm, gpg_key): - rpm_check_signature_cmd = 'rpm -v -K %s | grep -qi %s' % (rpm, gpg_key) - try: - subprocess.check_output(rpm_check_signature_cmd, shell=True) - except: - raise RuntimeError('Aborting. RPM does not seem to be signed, check with: rpm -v -K %s' % rpm) - -# Checks if a command exists, needed for external binaries -def check_command_exists(name, cmd): - try: - subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - raise RuntimeError('Could not run command %s - please make sure it is installed and in $PATH' % (name)) - -def run_and_print(text, run_function): - try: - print(text, end='') - run_function() - print(COLOR_OK + 'OK' + COLOR_END) - return True - except RuntimeError: - print(COLOR_FAIL + 'NOT OK' + COLOR_END) - return False - -def check_env_var(text, env_var): - try: - print(text, end='') - os.environ[env_var] - print(COLOR_OK + 'OK' + COLOR_END) - return True - except KeyError: - print(COLOR_FAIL + 'NOT OK' + COLOR_END) - return False - -def check_environment_and_commandline_tools(check_only): - checks = list() - checks.append(check_env_var('Checking for AWS env configuration AWS_SECRET_KEY... ', 'AWS_SECRET_KEY')) - checks.append(check_env_var('Checking for AWS env configuration AWS_ACCESS_KEY... ', 'AWS_ACCESS_KEY')) - checks.append(run_and_print('Checking command: rpm... ', partial(check_command_exists, 'rpm', 'rpm --version'))) - checks.append(run_and_print('Checking command: dpkg... ', partial(check_command_exists, 'dpkg', 'dpkg --version'))) - checks.append(run_and_print('Checking command: gpg... ', partial(check_command_exists, 'gpg', 'gpg --version'))) - checks.append(run_and_print('Checking command: expect... ', partial(check_command_exists, 'expect', 'expect -v'))) - checks.append(run_and_print('Checking command: createrepo... ', partial(check_command_exists, 'createrepo', 'createrepo --version'))) - checks.append(run_and_print('Checking command: s3cmd... ', partial(check_command_exists, 's3cmd', 's3cmd --version'))) - checks.append(run_and_print('Checking command: deb-s3... ', partial(check_command_exists, 'deb-s3', 'deb-s3 -h'))) - checks.append(run_and_print('Checking command: rpm-s3... ', partial(check_command_exists, 'rpm-s3', 'rpm-s3 -h'))) - - if check_only: - sys.exit(0) - - if False in checks: - print("Exiting due to failing checks") - sys.exit(0) - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release') - parser.add_argument('--deploy-sonatype', dest='deploy_sonatype', action='store_true', - help='Installs and Deploys the release on a sonatype staging repository.') - parser.add_argument('--deploy-s3', dest='deploy_s3', action='store_true', - help='Pushes artifacts to the S3 staging area') - parser.add_argument('--deploy-s3-repos', dest='deploy_s3_repos', action='store_true', - help='Creates package repositories in S3 repo') - parser.add_argument('--no-install', dest='no_install', action='store_true', - help='Does not run "mvn install", expects this to be run already and reuses artifacts from local repo, only useful with --deploy-s3/--deploy-s3-repos, after sonatype deplomeny to ensure same artifacts') - parser.add_argument('--skip-doc-check', dest='skip_doc_check', action='store_false', - help='Skips any checks for pending documentation changes') - parser.add_argument('--skip-tests', dest='skip_tests', action='store_true', - help='Skips any test runs') - parser.add_argument('--gpg-key', dest='gpg_key', default="D88E42B4", - help='Allows you to specify a different gpg_key to be used instead of the default release key') - parser.add_argument('--bucket', '-b', dest='bucket', default="download.elasticsearch.org", - help='Allows you to specify a different s3 bucket to upload the artifacts to') - parser.add_argument('--quiet', dest='quiet', action='store_true', - help='Runs the script in quiet mode') - parser.add_argument('--check', dest='check', action='store_true', - help='Checks and reports for all requirements and then exits') - - # by default, we only run mvn install and don't push anything repo - parser.set_defaults(deploy_sonatype=False) - parser.set_defaults(deploy_s3=False) - parser.set_defaults(deploy_s3_repos=False) - parser.set_defaults(no_install=False) - # other defaults - parser.set_defaults(skip_doc_check=False) - parser.set_defaults(quiet=False) - parser.set_defaults(skip_tests=False) - - args = parser.parse_args() - skip_doc_check = args.skip_doc_check - gpg_key = args.gpg_key - bucket = args.bucket - deploy_sonatype = args.deploy_sonatype - deploy_s3 = args.deploy_s3 - deploy_s3_repos = args.deploy_s3_repos - run_mvn_install = not args.no_install - skip_tests = args.skip_tests - - check_environment_and_commandline_tools(args.check) - - if not run_mvn_install and deploy_sonatype: - print('Using --no-install and --deploy-sonatype together does not work. Exiting') - sys.exit(-1) - - print('*** Preparing a release candidate: ', end='') - print('deploy sonatype: %s%s%s' % (COLOR_OK if deploy_sonatype else COLOR_FAIL, 'yes' if deploy_sonatype else 'no', COLOR_END), end='') - print(', deploy s3: %s%s%s' % (COLOR_OK if deploy_s3 else COLOR_FAIL, 'yes' if deploy_s3 else 'no', COLOR_END), end='') - print(', deploy s3 repos: %s%s%s' % (COLOR_OK if deploy_s3_repos else COLOR_FAIL, 'yes' if deploy_s3_repos else 'no', COLOR_END), end='') - print('') - - shortHash = subprocess.check_output('git log --pretty=format:"%h" -n 1', shell=True).decode('utf-8') - releaseDirectory = os.getenv('HOME') + '/elastic-releases' - release_version = find_release_version() - localRepo = '%s/elasticsearch-%s-%s' % (releaseDirectory, release_version, shortHash) - localRepoElasticsearch = localRepo + '/org/elasticsearch' - - ensure_checkout_is_clean() - if not re.match('(\d+\.\d+)\.*',release_version): - raise RuntimeError('illegal release version format: %s' % (release_version)) - package_repo_version = '%s.x' % re.match('(\d+)\.*', release_version).group(1) - - print('*** Preparing release version: [%s]' % release_version) - - if not skip_doc_check: - print('*** Check for pending documentation changes') - pending_files = update_reference_docs(release_version) - if pending_files: - raise RuntimeError('pending coming[%s] documentation changes found in %s' % (release_version, pending_files)) - - run('cd dev-tools && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version)) - run('cd rest-api-spec && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version)) - run('mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version)) - - remove_version_snapshot(VERSION_FILE, release_version) - - print('*** Done removing snapshot version. DO NOT COMMIT THIS, WHEN CREATING A RELEASE CANDIDATE.') - - if not os.path.exists(releaseDirectory): - os.mkdir(releaseDirectory) - if os.path.exists(localRepoElasticsearch) and run_mvn_install: - print('clean local repository %s' % localRepoElasticsearch) - shutil.rmtree(localRepoElasticsearch) - - mvn_target = 'deploy' if deploy_sonatype else 'install' - tests = '-DskipTests' if skip_tests else '-Dskip.integ.tests=true' - install_command = 'mvn clean %s -Prelease %s -Dgpg.key="%s" -Dpackaging.rpm.rpmbuild=/usr/bin/rpmbuild -Drpm.sign=true -Dmaven.repo.local=%s -Dno.commit.pattern="\\bno(n|)commit\\b" -Dforbidden.test.signatures=""' % (mvn_target, tests, gpg_key, localRepo) - clean_repo_command = 'find %s -name _remote.repositories -exec rm {} \;' % (localRepoElasticsearch) - - if not run_mvn_install: - print('') - print('*** By choosing --no-install we assume you ran the following commands successfully:') - print(' %s' % (install_command)) - print(' 1. Remove all _remote.repositories: %s' % (clean_repo_command)) - rename_metadata_files_command = 'for i in $(find %s -name "maven-metadata-local.xml*") ; do mv "$i" "${i/-local/}" ; done' % (localRepoElasticsearch) - print(' 2. Rename all maven metadata files: %s' % (rename_metadata_files_command)) - else: - for cmd in [install_command, clean_repo_command]: - run(cmd) - rename_local_meta_files(localRepoElasticsearch) - - rpm = '%s/distribution/rpm/elasticsearch/%s/elasticsearch-%s.rpm' % (localRepoElasticsearch, release_version, release_version) - print('Ensuring that RPM has been signed') - ensure_rpm_is_signed(rpm, gpg_key) - - # repository push commands - s3cmd_sync_to_staging_bucket_cmd = 's3cmd sync -P %s s3://%s/elasticsearch/staging/%s-%s/org/' % (localRepoElasticsearch, bucket, release_version, shortHash) - s3_bucket_sync_to = '%s/elasticsearch/staging/%s-%s/repos/' % (bucket, release_version, shortHash) - s3cmd_sync_official_repo_cmd = 's3cmd sync s3://packages.elasticsearch.org/elasticsearch/%s s3://%s' % (package_repo_version, s3_bucket_sync_to) - - debs3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/debian' % (release_version, shortHash, package_repo_version) - debs3_upload_cmd = 'deb-s3 upload --preserve-versions %s/distribution/deb/elasticsearch/%s/elasticsearch-%s.deb -b %s --prefix %s --sign %s --arch amd64' % (localRepoElasticsearch, release_version, release_version, bucket, debs3_prefix, gpg_key) - debs3_list_cmd = 'deb-s3 list -b %s --prefix %s' % (bucket, debs3_prefix) - debs3_verify_cmd = 'deb-s3 verify -b %s --prefix %s' % (bucket, debs3_prefix) - rpms3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/centos' % (release_version, shortHash, package_repo_version) - # external-1 is the alias name for the us-east-1 region. This is used by rpm-s3 to construct the hostname - rpms3_upload_cmd = 'rpm-s3 -v -b %s -p %s --sign --visibility public-read -k 100 %s -r external-1' % (bucket, rpms3_prefix, rpm) - - if deploy_s3: - run(s3cmd_sync_to_staging_bucket_cmd) - else: - print('') - print('*** To push a release candidate to s3 run: ') - print(' 1. Sync %s into S3 bucket' % (localRepoElasticsearch)) - print (' %s' % (s3cmd_sync_to_staging_bucket_cmd)) - - if deploy_s3_repos: - print('*** Syncing official package repository into staging s3 bucket') - run(s3cmd_sync_official_repo_cmd) - print('*** Uploading debian package (you will be prompted for the passphrase!)') - run(debs3_upload_cmd) - run(debs3_list_cmd) - run(debs3_verify_cmd) - print('*** Uploading rpm package (you will be prompted for the passphrase!)') - run(rpms3_upload_cmd) - else: - print('*** To create repositories on S3 run:') - print(' 1. Sync existing repo into staging: %s' % s3cmd_sync_official_repo_cmd) - print(' 2. Upload debian package (and sign it): %s' % debs3_upload_cmd) - print(' 3. List all debian packages: %s' % debs3_list_cmd) - print(' 4. Verify debian packages: %s' % debs3_verify_cmd) - print(' 5. Upload RPM: %s' % rpms3_upload_cmd) - print('') - print('NOTE: the above mvn command will promt you several times for the GPG passphrase of the key you specified you can alternatively pass it via -Dgpg.passphrase=yourPassPhrase') - print(' since RPM signing doesn\'t support gpg-agents the recommended way to set the password is to add a release profile to your settings.xml:') - print(""" - - - release - - YourPasswordGoesHere - - - - """) - print('NOTE: Running s3cmd might require you to create a config file with your credentials, if the s3cmd does not support suppliying them via the command line!') - - print('*** Once the release is deployed and published send out the following mail to dev@elastic.co:') - string_format_dict = {'version' : release_version, 'hash': shortHash, 'package_repo_version' : package_repo_version, 'bucket': bucket} - print(MAIL_TEMPLATE % string_format_dict) - - print('') - print('You can verify that pushing to the staging repository pushed all the artifacts by running (log into sonatype to find out the correct id):') - print(' python -B dev-tools/validate-maven-repository.py %s https://oss.sonatype.org/service/local/repositories/orgelasticsearch-IDTOFINDOUT/content/org/elasticsearch ' %(localRepoElasticsearch)) - - print('') - print('To publish the release and the repo on S3 execute the following commands:') - print(' s3cmd cp --recursive s3://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/repos/%(package_repo_version)s/ s3://packages.elasticsearch.org/elasticsearch/%(package_repo_version)s' % string_format_dict) - print(' s3cmd cp --recursive s3://%(bucket)s/elasticsearch/staging/%(version)s-%(hash)s/org/ s3://%(bucket)s/elasticsearch/release/org' % string_format_dict) - print('Now go ahead and tag the release:') - print(' git tag -a v%(version)s %(hash)s' % string_format_dict) - print(' git push origin v%(version)s' % string_format_dict ) - - diff --git a/dev-tools/smoke_test_rc.py b/dev-tools/smoke_test_rc.py index 420ef35f36f..9a2e7f9b786 100644 --- a/dev-tools/smoke_test_rc.py +++ b/dev-tools/smoke_test_rc.py @@ -55,7 +55,6 @@ import json import base64 from urllib.parse import urlparse -from prepare_release_candidate import run from http.client import HTTPConnection DEFAULT_PLUGINS = ["analysis-icu", @@ -88,6 +87,18 @@ except KeyError: Please set JAVA_HOME in the env before running release tool On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.8*'`""") +# console colors +COLOR_OK = '\033[92m' +COLOR_END = '\033[0m' + +def run(command, env_vars=None): + if env_vars: + for key, value in env_vars.items(): + os.putenv(key, value) + print('*** Running: %s%s%s' % (COLOR_OK, command, COLOR_END)) + if os.system(command): + raise RuntimeError(' FAILED: %s' % (command)) + def java_exe(): path = JAVA_HOME return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path) @@ -97,7 +108,6 @@ def verify_java_version(version): if ' version "%s.' % version not in s: raise RuntimeError('got wrong version for java %s:\n%s' % (version, s)) - def sha1(file): with open(file, 'rb') as f: return hashlib.sha1(f.read()).hexdigest() @@ -106,7 +116,6 @@ def read_fully(file): with open(file, encoding='utf-8') as f: return f.read() - def wait_for_node_startup(es_dir, timeout=60, header={}): print(' Waiting until node becomes available for at most %s seconds' % timeout) for _ in range(timeout): @@ -225,7 +234,7 @@ def smoke_test_release(release, files, hash, plugins): if version['build_snapshot']: raise RuntimeError('Expected non snapshot version') print(' Verify if plugins are listed in _nodes') - conn.request('GET', '/_nodes?plugin=true&pretty=true', headers=headers) + conn.request('GET', '/_nodes/plugins?pretty=true', headers=headers) res = conn.getresponse() if res.status == 200: nodes = json.loads(res.read().decode("utf-8"))['nodes'] diff --git a/dev-tools/validate-maven-repository.py b/dev-tools/validate-maven-repository.py deleted file mode 100644 index 6bf84a3a185..00000000000 --- a/dev-tools/validate-maven-repository.py +++ /dev/null @@ -1,130 +0,0 @@ -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on -# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -# either express or implied. See the License for the specific -# language governing permissions and limitations under the License. - -# Helper python script to check if a sonatype staging repo contains -# all the required files compared to a local repository -# -# The script does the following steps -# -# 1. Scans the local maven repo for all files in /org/elasticsearch -# 2. Opens a HTTP connection to the staging repo -# 3. Executes a HEAD request for each file found in step one -# 4. Compares the content-length response header with the real file size -# 5. Return an error if those two numbers differ -# -# A pre requirement to run this, is to find out via the oss.sonatype.org web UI, how that repo is named -# - After logging in you go to 'Staging repositories' and search for the one you just created -# - Click into the `Content` tab -# - Open any artifact (not a directory) -# - Copy the link of `Repository Path` on the right and reuse that part of the URL -# -# Alternatively you can just use the name of the repository and reuse the rest (ie. the repository -# named for the example below would have been named orgelasticsearch-1012) -# -# -# Example call -# python dev-tools/validate-maven-repository.py /path/to/repo/org/elasticsearch/ \ -# https://oss.sonatype.org/service/local/repositories/orgelasticsearch-1012/content/org/elasticsearch - -import sys -import os -import httplib -import urlparse -import re - -# Draw a simple progress bar, a couple of hundred HEAD requests might take a while -# Note, when drawing this, it uses the carriage return character, so you should not -# write anything in between -def drawProgressBar(percent, barLen = 40): - sys.stdout.write("\r") - progress = "" - for i in range(barLen): - if i < int(barLen * percent): - progress += "=" - else: - progress += " " - sys.stdout.write("[ %s ] %.2f%%" % (progress, percent * 100)) - sys.stdout.flush() - -if __name__ == "__main__": - if len(sys.argv) != 3: - print 'Usage: %s [user:pass]' % (sys.argv[0]) - print '' - print 'Example: %s /tmp/my-maven-repo/org/elasticsearch https://oss.sonatype.org/service/local/repositories/orgelasticsearch-1012/content/org/elasticsearch' % (sys.argv[0]) - else: - sys.argv[1] = re.sub('/$', '', sys.argv[1]) - sys.argv[2] = re.sub('/$', '', sys.argv[2]) - - localMavenRepo = sys.argv[1] - endpoint = sys.argv[2] - - filesToCheck = [] - foundSignedFiles = False - - for root, dirs, files in os.walk(localMavenRepo): - for file in files: - # no metadata files (they get renamed from maven-metadata-local.xml to maven-metadata.xml while deploying) - # no .properties and .repositories files (they don't get uploaded) - if not file.startswith('maven-metadata') and not file.endswith('.properties') and not file.endswith('.repositories'): - filesToCheck.append(os.path.join(root, file)) - if file.endswith('.asc'): - foundSignedFiles = True - - print "Need to check %i files" % len(filesToCheck) - if not foundSignedFiles: - print '### Warning: No signed .asc files found' - - # set up http - parsed_uri = urlparse.urlparse(endpoint) - domain = parsed_uri.netloc - if parsed_uri.scheme == 'https': - conn = httplib.HTTPSConnection(domain) - else: - conn = httplib.HTTPConnection(domain) - #conn.set_debuglevel(5) - - drawProgressBar(0) - errors = [] - for idx, file in enumerate(filesToCheck): - request_uri = parsed_uri.path + file[len(localMavenRepo):] - conn.request("HEAD", request_uri) - res = conn.getresponse() - res.read() # useless call for head, but prevents httplib.ResponseNotReady raise - - absolute_url = parsed_uri.scheme + '://' + parsed_uri.netloc + request_uri - if res.status == 200: - content_length = res.getheader('content-length') - local_file_size = os.path.getsize(file) - if int(content_length) != int(local_file_size): - errors.append('LENGTH MISMATCH: %s differs in size. local %s <=> %s remote' % (absolute_url, content_length, local_file_size)) - elif res.status == 404: - errors.append('MISSING: %s' % absolute_url) - elif res.status == 301 or res.status == 302: - errors.append('REDIRECT: %s to %s' % (absolute_url, res.getheader('location'))) - else: - errors.append('ERROR: %s http response: %s %s' %(absolute_url, res.status, res.reason)) - - # update progressbar at the end - drawProgressBar((idx+1)/float(len(filesToCheck))) - - print - - if len(errors) != 0: - print 'The following errors occurred (%s out of %s files)' % (len(errors), len(filesToCheck)) - print - for error in errors: - print error - sys.exit(-1) diff --git a/distribution/build.gradle b/distribution/build.gradle index fe172620b5b..2cfd7ebbbce 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -39,10 +39,6 @@ buildscript { } } -// this is common configuration for distributions, but we also add it here for the license check to use -ext.dependencyFiles = project(':core').configurations.runtime.copyRecursive() - - /***************************************************************************** * Modules * *****************************************************************************/ @@ -146,7 +142,7 @@ subprojects { libFiles = copySpec { into 'lib' from project(':core').jar - from project(':distribution').dependencyFiles + from project(':core').configurations.runtime } modulesFiles = copySpec { @@ -196,10 +192,19 @@ subprojects { * Zip and tgz configuration * *****************************************************************************/ configure(subprojects.findAll { ['zip', 'tar', 'integ-test-zip'].contains(it.name) }) { + // CopySpec does not make it easy to create an empty director so we create the directory that we want, and then point CopySpec to its + // parent to copy to the root of the distribution + File plugins = new File(buildDir, 'plugins-hack/plugins') + task createPluginsDir(type: EmptyDirTask) { + dir "${plugins}" + dirMode 0755 + } project.ext.archivesFiles = copySpec { into("elasticsearch-${version}") { with libFiles into('config') { + dirMode 0750 + fileMode 0660 with configFiles } into('bin') { @@ -212,6 +217,11 @@ configure(subprojects.findAll { ['zip', 'tar', 'integ-test-zip'].contains(it.nam MavenFilteringHack.filter(it, expansions) } } + into('') { + from { + plugins.getParent() + } + } with commonFiles from('../src/main/resources') { include 'bin/*.exe' @@ -242,6 +252,12 @@ configure(subprojects.findAll { ['zip', 'tar', 'integ-test-zip'].contains(it.nam * 3. ospackage really wants to suck up some of the debian control scripts * directly from the filesystem. It doesn't want to process them through * MavenFilteringHack or any other copy-style action. + * + * The following commands are useful when it comes to check the user/group + * and files permissions set within the RPM and DEB packages: + * + * rpm -qlp --dump path/to/elasticsearch.rpm + * dpkg -c path/to/elasticsearch.deb */ configure(subprojects.findAll { ['deb', 'rpm'].contains(it.name) }) { integTest.enabled = Os.isFamily(Os.FAMILY_WINDOWS) == false @@ -276,8 +292,6 @@ configure(subprojects.findAll { ['deb', 'rpm'].contains(it.name) }) { dependsOn createEtc, createEtcScripts with configFiles into "${packagingFiles}/etc/elasticsearch" - fileMode 0640 - dirMode 0750 /* Explicitly declare the output files so this task doesn't consider itself up to date when the directory is created, which it would by default. And that'll happen when createEtc runs. */ @@ -365,7 +379,8 @@ configure(subprojects.findAll { ['deb', 'rpm'].contains(it.name) }) { configurationFile '/etc/elasticsearch/jvm.options' configurationFile '/etc/elasticsearch/log4j2.properties' into('/etc/elasticsearch') { - fileMode 0750 + dirMode 0750 + fileMode 0660 permissionGroup 'elasticsearch' includeEmptyDirs true createDirectoryEntry true @@ -387,51 +402,38 @@ configure(subprojects.findAll { ['deb', 'rpm'].contains(it.name) }) { } configurationFile '/etc/init.d/elasticsearch' into('/etc/init.d') { - fileMode 0755 + fileMode 0750 fileType CONFIG | NOREPLACE from "${packagingFiles}/init.d/elasticsearch" } configurationFile project.expansions['path.env'] into(new File(project.expansions['path.env']).getParent()) { - fileMode 0644 - dirMode 0755 fileType CONFIG | NOREPLACE + fileMode 0660 from "${project.packagingFiles}/env/elasticsearch" } /** * Suck up all the empty directories that we need to install into the path. */ - Closure suckUpEmptyDirectories = { path, u, g -> + Closure suckUpEmptyDirectories = { path, u, g, mode -> into(path) { - fileMode 0755 from "${packagingFiles}/${path}" includeEmptyDirs true createDirectoryEntry true user u permissionGroup g + dirMode mode + fileMode mode } } - suckUpEmptyDirectories('/var/run', 'elasticsearch', 'elasticsearch') - suckUpEmptyDirectories('/var/log', 'elasticsearch', 'elasticsearch') - suckUpEmptyDirectories('/var/lib', 'elasticsearch', 'elasticsearch') - suckUpEmptyDirectories('/usr/share/elasticsearch', 'root', 'root') + suckUpEmptyDirectories('/var/run', 'elasticsearch', 'elasticsearch', 0755) + suckUpEmptyDirectories('/var/log', 'elasticsearch', 'elasticsearch', 0750) + suckUpEmptyDirectories('/var/lib', 'elasticsearch', 'elasticsearch', 0750) + suckUpEmptyDirectories('/usr/share/elasticsearch', 'root', 'root', 0755) } } -// TODO: dependency checks should really be when building the jar itself, which would remove the need -// for this hackery and instead we can do this inside the BuildPlugin -task dependencyLicenses(type: DependencyLicensesTask) { - dependsOn = [dependencyFiles] - dependencies = dependencyFiles - mapping from: /lucene-.*/, to: 'lucene' - mapping from: /jackson-.*/, to: 'jackson' -} -task check(group: 'Verification', description: 'Runs all checks.', dependsOn: dependencyLicenses) {} // dummy task! -task updateShas(type: UpdateShasTask) { - parentTask = dependencyLicenses -} - task run(type: RunTask) { distribution = 'zip' } diff --git a/distribution/deb/src/main/packaging/init.d/elasticsearch b/distribution/deb/src/main/packaging/init.d/elasticsearch index f04008ba796..cf8b5351aec 100755 --- a/distribution/deb/src/main/packaging/init.d/elasticsearch +++ b/distribution/deb/src/main/packaging/init.d/elasticsearch @@ -84,6 +84,7 @@ DAEMON_OPTS="-d -p $PID_FILE -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$D export ES_JAVA_OPTS export JAVA_HOME export ES_INCLUDE +export ES_JVM_OPTIONS if [ ! -x "$DAEMON" ]; then echo "The elasticsearch startup script does not exists or it is not executable, tried: $DAEMON" @@ -137,7 +138,7 @@ case "$1" in fi # Start Daemon - start-stop-daemon -d $ES_HOME --start -b --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS + start-stop-daemon -d $ES_HOME --start --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS return=$? if [ $return -eq 0 ]; then i=0 diff --git a/distribution/deb/src/test/java/org/elasticsearch/test/rest/DebClientYamlTestSuiteIT.java b/distribution/deb/src/test/java/org/elasticsearch/test/rest/DebClientYamlTestSuiteIT.java index a94f0faf2d2..a63b304a1d4 100644 --- a/distribution/deb/src/test/java/org/elasticsearch/test/rest/DebClientYamlTestSuiteIT.java +++ b/distribution/deb/src/test/java/org/elasticsearch/test/rest/DebClientYamlTestSuiteIT.java @@ -35,6 +35,6 @@ public class DebClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return createParameters(0, 1); + return createParameters(); } } diff --git a/distribution/integ-test-zip/build.gradle b/distribution/integ-test-zip/build.gradle index ae4a499efd8..80da4131995 100644 --- a/distribution/integ-test-zip/build.gradle +++ b/distribution/integ-test-zip/build.gradle @@ -20,6 +20,7 @@ import org.elasticsearch.gradle.plugin.PluginBuildPlugin task buildZip(type: Zip) { + dependsOn createPluginsDir baseName = 'elasticsearch' with archivesFiles } diff --git a/distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/IntegTestZipClientYamlTestSuiteIT.java b/distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/IntegTestZipClientYamlTestSuiteIT.java index ca54a3becd9..c81ff7439f9 100644 --- a/distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/IntegTestZipClientYamlTestSuiteIT.java +++ b/distribution/integ-test-zip/src/test/java/org/elasticsearch/test/rest/IntegTestZipClientYamlTestSuiteIT.java @@ -35,6 +35,6 @@ public class IntegTestZipClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return createParameters(0, 1); + return createParameters(); } } diff --git a/distribution/licenses/joda-convert-1.2.jar.sha1 b/distribution/licenses/joda-convert-1.2.jar.sha1 deleted file mode 100644 index 37c3e870580..00000000000 --- a/distribution/licenses/joda-convert-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -35ec554f0cd00c956cc69051514d9488b1374dec diff --git a/distribution/licenses/joda-convert-LICENSE.txt b/distribution/licenses/joda-convert-LICENSE.txt deleted file mode 100644 index 75b52484ea4..00000000000 --- a/distribution/licenses/joda-convert-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/distribution/licenses/joda-convert-NOTICE.txt b/distribution/licenses/joda-convert-NOTICE.txt deleted file mode 100644 index dffbcf31cac..00000000000 --- a/distribution/licenses/joda-convert-NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -============================================================================= -= NOTICE file corresponding to section 4d of the Apache License Version 2.0 = -============================================================================= -This product includes software developed by -Joda.org (http://www.joda.org/). diff --git a/distribution/licenses/joda-time-2.9.4.jar.sha1 b/distribution/licenses/joda-time-2.9.4.jar.sha1 deleted file mode 100644 index e9ea891bfee..00000000000 --- a/distribution/licenses/joda-time-2.9.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c295b462f16702ebe720bbb08f62e1ba80da41b \ No newline at end of file diff --git a/distribution/licenses/log4j-1.2-api-2.6.2.jar.sha1 b/distribution/licenses/log4j-1.2-api-2.6.2.jar.sha1 deleted file mode 100644 index 5bf4bcab46f..00000000000 --- a/distribution/licenses/log4j-1.2-api-2.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3b4c5a8b734b6a29b2f03380535a48da6284b210 \ No newline at end of file diff --git a/distribution/licenses/log4j-api-2.6.2.jar.sha1 b/distribution/licenses/log4j-api-2.6.2.jar.sha1 deleted file mode 100644 index e4f9af7497c..00000000000 --- a/distribution/licenses/log4j-api-2.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd1b74a5d170686362091c7cf596bbc3adf5c09b \ No newline at end of file diff --git a/distribution/licenses/log4j-core-2.6.2.jar.sha1 b/distribution/licenses/log4j-core-2.6.2.jar.sha1 deleted file mode 100644 index 0ac4323411c..00000000000 --- a/distribution/licenses/log4j-core-2.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -00a91369f655eb1639c6aece5c5eb5108db18306 \ No newline at end of file diff --git a/distribution/licenses/lucene-analyzers-common-6.2.0.jar.sha1 b/distribution/licenses/lucene-analyzers-common-6.2.0.jar.sha1 deleted file mode 100644 index 57aec3f4ac2..00000000000 --- a/distribution/licenses/lucene-analyzers-common-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d254d52dd394b5079129f3d5f3bf4f2d44a5936e \ No newline at end of file diff --git a/distribution/licenses/lucene-backward-codecs-6.2.0.jar.sha1 b/distribution/licenses/lucene-backward-codecs-6.2.0.jar.sha1 deleted file mode 100644 index 04aefc62f61..00000000000 --- a/distribution/licenses/lucene-backward-codecs-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b625bb21456b3c0d1e5e431bced125cb060c1abd \ No newline at end of file diff --git a/distribution/licenses/lucene-core-6.2.0.jar.sha1 b/distribution/licenses/lucene-core-6.2.0.jar.sha1 deleted file mode 100644 index 2d74124e624..00000000000 --- a/distribution/licenses/lucene-core-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -849ee62525a294416802be2cacc66c80352f6f13 \ No newline at end of file diff --git a/distribution/licenses/lucene-grouping-6.2.0.jar.sha1 b/distribution/licenses/lucene-grouping-6.2.0.jar.sha1 deleted file mode 100644 index 6ba525a038f..00000000000 --- a/distribution/licenses/lucene-grouping-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9527fedfd5acc624b2bb3f862bd99fb8f470b053 \ No newline at end of file diff --git a/distribution/licenses/lucene-highlighter-6.2.0.jar.sha1 b/distribution/licenses/lucene-highlighter-6.2.0.jar.sha1 deleted file mode 100644 index c258e3fb850..00000000000 --- a/distribution/licenses/lucene-highlighter-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ca342372a3f45e32bbd21cecaa757e38eccb8a5 \ No newline at end of file diff --git a/distribution/licenses/lucene-join-6.2.0.jar.sha1 b/distribution/licenses/lucene-join-6.2.0.jar.sha1 deleted file mode 100644 index 01989e96a58..00000000000 --- a/distribution/licenses/lucene-join-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da0b8de98511abd4fe9c7d48a353d17854c5ed46 \ No newline at end of file diff --git a/distribution/licenses/lucene-memory-6.2.0.jar.sha1 b/distribution/licenses/lucene-memory-6.2.0.jar.sha1 deleted file mode 100644 index b8a4a87efe2..00000000000 --- a/distribution/licenses/lucene-memory-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bc9e075b1ee051c8e5246c237c38d8e71dab8a66 \ No newline at end of file diff --git a/distribution/licenses/lucene-misc-6.2.0.jar.sha1 b/distribution/licenses/lucene-misc-6.2.0.jar.sha1 deleted file mode 100644 index f4e081865ad..00000000000 --- a/distribution/licenses/lucene-misc-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94ddde6312566a4da4a50a88e453b6c82c759b41 \ No newline at end of file diff --git a/distribution/licenses/lucene-queries-6.2.0.jar.sha1 b/distribution/licenses/lucene-queries-6.2.0.jar.sha1 deleted file mode 100644 index f7270a23afe..00000000000 --- a/distribution/licenses/lucene-queries-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dce47238f78e3e97d91dc6fefa9f46f07866bc2b \ No newline at end of file diff --git a/distribution/licenses/lucene-queryparser-6.2.0.jar.sha1 b/distribution/licenses/lucene-queryparser-6.2.0.jar.sha1 deleted file mode 100644 index 8e95aa600ec..00000000000 --- a/distribution/licenses/lucene-queryparser-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -17ef728ac15e668bfa1105321611548424637645 \ No newline at end of file diff --git a/distribution/licenses/lucene-sandbox-6.2.0.jar.sha1 b/distribution/licenses/lucene-sandbox-6.2.0.jar.sha1 deleted file mode 100644 index 1f34be3033d..00000000000 --- a/distribution/licenses/lucene-sandbox-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -520183f7b9aba77a26e224760c420a3844b0631a \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-6.2.0.jar.sha1 b/distribution/licenses/lucene-spatial-6.2.0.jar.sha1 deleted file mode 100644 index 22e81792e40..00000000000 --- a/distribution/licenses/lucene-spatial-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8dba929b66927b936fbc76103b109ad6c824daee \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-extras-6.2.0.jar.sha1 b/distribution/licenses/lucene-spatial-extras-6.2.0.jar.sha1 deleted file mode 100644 index d5e8f379d78..00000000000 --- a/distribution/licenses/lucene-spatial-extras-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3b5a6ef5cd90c0218a72e9e2f7e60104be2447da \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial3d-6.2.0.jar.sha1 b/distribution/licenses/lucene-spatial3d-6.2.0.jar.sha1 deleted file mode 100644 index d0ce5275a26..00000000000 --- a/distribution/licenses/lucene-spatial3d-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fcdb0567725722c5145149d1502848b6a96ec18d \ No newline at end of file diff --git a/distribution/licenses/lucene-suggest-6.2.0.jar.sha1 b/distribution/licenses/lucene-suggest-6.2.0.jar.sha1 deleted file mode 100644 index 39392ad1158..00000000000 --- a/distribution/licenses/lucene-suggest-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d9d526c51f483d27f425c75d7e56bc0849242d6 \ No newline at end of file diff --git a/distribution/rpm/build.gradle b/distribution/rpm/build.gradle index a0dc33b9ad4..6f8299522ca 100644 --- a/distribution/rpm/build.gradle +++ b/distribution/rpm/build.gradle @@ -18,7 +18,7 @@ */ task buildRpm(type: Rpm) { - dependsOn dependencyFiles, preparePackagingFiles + dependsOn preparePackagingFiles baseName 'elasticsearch' // this is what pom generation uses for artifactId // Follow elasticsearch's rpm file naming convention archiveName "${packageName}-${project.version}.rpm" diff --git a/distribution/rpm/src/main/packaging/init.d/elasticsearch b/distribution/rpm/src/main/packaging/init.d/elasticsearch index 8f1d93dcbdc..f991dc2f928 100644 --- a/distribution/rpm/src/main/packaging/init.d/elasticsearch +++ b/distribution/rpm/src/main/packaging/init.d/elasticsearch @@ -60,9 +60,10 @@ prog="elasticsearch" pidfile="$PID_DIR/${prog}.pid" export ES_JAVA_OPTS -export ES_STARTUP_SLEEP_TIME export JAVA_HOME export ES_INCLUDE +export ES_JVM_OPTIONS +export ES_STARTUP_SLEEP_TIME lockfile=/var/lock/subsys/$prog diff --git a/distribution/rpm/src/test/java/org/elasticsearch/test/rest/RpmClientYamlTestSuiteIT.java b/distribution/rpm/src/test/java/org/elasticsearch/test/rest/RpmClientYamlTestSuiteIT.java index a5b7f46269f..9569dfe4d10 100644 --- a/distribution/rpm/src/test/java/org/elasticsearch/test/rest/RpmClientYamlTestSuiteIT.java +++ b/distribution/rpm/src/test/java/org/elasticsearch/test/rest/RpmClientYamlTestSuiteIT.java @@ -35,6 +35,6 @@ public class RpmClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return createParameters(0, 1); + return createParameters(); } } diff --git a/distribution/src/main/packaging/scripts/postinst b/distribution/src/main/packaging/scripts/postinst index 451ec3457d3..6d19e5f33c7 100644 --- a/distribution/src/main/packaging/scripts/postinst +++ b/distribution/src/main/packaging/scripts/postinst @@ -103,7 +103,7 @@ chmod 0750 /etc/elasticsearch chmod 0750 /etc/elasticsearch/scripts if [ -f /etc/sysconfig/elasticsearch ]; then - chmod 0644 /etc/sysconfig/elasticsearch + chmod 0660 /etc/sysconfig/elasticsearch fi ${scripts.footer} diff --git a/distribution/src/main/resources/bin/elasticsearch b/distribution/src/main/resources/bin/elasticsearch index 7b3d0ea1981..efac6fc4b63 100755 --- a/distribution/src/main/resources/bin/elasticsearch +++ b/distribution/src/main/resources/bin/elasticsearch @@ -47,7 +47,7 @@ if echo '${project.name}' | grep project.name > /dev/null ; then cat >&2 << EOF -Error: You must build the project with Maven or download a pre-built package +Error: You must build the project with Gradle or download a pre-built package before you can run Elasticsearch. See 'Building from Source' in README.textile or visit https://www.elastic.co/download to get a pre-built package. EOF diff --git a/distribution/src/main/resources/bin/elasticsearch-service.bat b/distribution/src/main/resources/bin/elasticsearch-service.bat index 609b8bda846..f2aa5c3e3de 100644 --- a/distribution/src/main/resources/bin/elasticsearch-service.bat +++ b/distribution/src/main/resources/bin/elasticsearch-service.bat @@ -209,15 +209,15 @@ for %%a in ("%ES_JAVA_OPTS:;=","%") do ( @endlocal & set JVM_MS=%JVM_MS% & set JVM_MX=%JVM_MX% & set JVM_SS=%JVM_SS% if "%JVM_MS%" == "" ( - echo minimum heap size not set; configure via %ES_JVM_OPTIONS% or ES_JAVA_OPTS + echo minimum heap size not set; configure using -Xms via %ES_JVM_OPTIONS% or ES_JAVA_OPTS goto:eof ) if "%JVM_MX%" == "" ( - echo maximum heap size not set; configure via %ES_JVM_OPTIONS% or ES_JAVA_OPTS + echo maximum heap size not set; configure using -Xmx via %ES_JVM_OPTIONS% or ES_JAVA_OPTS goto:eof ) if "%JVM_SS%" == "" ( - echo thread stack size not set; configure via %ES_JVM_OPTIONS% or ES_JAVA_OPTS + echo thread stack size not set; configure using -Xss via %ES_JVM_OPTIONS% or ES_JAVA_OPTS goto:eof ) diff --git a/distribution/src/main/resources/config/elasticsearch.yml b/distribution/src/main/resources/config/elasticsearch.yml index 0abff999bcb..3d732e74c0e 100644 --- a/distribution/src/main/resources/config/elasticsearch.yml +++ b/distribution/src/main/resources/config/elasticsearch.yml @@ -86,10 +86,6 @@ # # ---------------------------------- Various ----------------------------------- # -# Disable starting multiple nodes on a single system: -# -#node.max_local_storage_nodes: 1 -# # Require explicit names when deleting indices: # #action.destructive_requires_name: true diff --git a/distribution/tar/build.gradle b/distribution/tar/build.gradle index 8e34a6ce9ce..2140061ee48 100644 --- a/distribution/tar/build.gradle +++ b/distribution/tar/build.gradle @@ -18,6 +18,7 @@ */ task buildTar(type: Tar) { + dependsOn createPluginsDir baseName = 'elasticsearch' extension = 'tar.gz' with archivesFiles diff --git a/distribution/tar/src/test/java/org/elasticsearch/test/rest/TarClientYamlTestSuiteIT.java b/distribution/tar/src/test/java/org/elasticsearch/test/rest/TarClientYamlTestSuiteIT.java index 73d323f7d50..0c811c383d0 100644 --- a/distribution/tar/src/test/java/org/elasticsearch/test/rest/TarClientYamlTestSuiteIT.java +++ b/distribution/tar/src/test/java/org/elasticsearch/test/rest/TarClientYamlTestSuiteIT.java @@ -35,6 +35,6 @@ public class TarClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return createParameters(0, 1); + return createParameters(); } } diff --git a/distribution/zip/build.gradle b/distribution/zip/build.gradle index ae4a499efd8..80da4131995 100644 --- a/distribution/zip/build.gradle +++ b/distribution/zip/build.gradle @@ -20,6 +20,7 @@ import org.elasticsearch.gradle.plugin.PluginBuildPlugin task buildZip(type: Zip) { + dependsOn createPluginsDir baseName = 'elasticsearch' with archivesFiles } diff --git a/distribution/zip/src/test/java/org/elasticsearch/test/rest/ZipClientYamlTestSuiteIT.java b/distribution/zip/src/test/java/org/elasticsearch/test/rest/ZipClientYamlTestSuiteIT.java index 329c8259d0b..52581c8e765 100644 --- a/distribution/zip/src/test/java/org/elasticsearch/test/rest/ZipClientYamlTestSuiteIT.java +++ b/distribution/zip/src/test/java/org/elasticsearch/test/rest/ZipClientYamlTestSuiteIT.java @@ -35,6 +35,6 @@ public class ZipClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return createParameters(0, 1); + return createParameters(); } } diff --git a/docs/README.asciidoc b/docs/README.asciidoc index 5da211c6622..367fdaa9d04 100644 --- a/docs/README.asciidoc +++ b/docs/README.asciidoc @@ -5,7 +5,10 @@ See: https://github.com/elastic/docs Snippets marked with `// CONSOLE` are automatically annotated with "VIEW IN SENSE" in the documentation and are automatically tested by the command -`gradle :docs:check`. By default `// CONSOLE` snippet runs as its own isolated +`gradle :docs:check`. To test just the docs from a single page, use e.g. +`gradle :docs:check -Dtest.method=*rollover*`. + +By default `// CONSOLE` snippet runs as its own isolated test. You can manipulate the test execution in the following ways: * `// TEST`: Explicitly marks a snippet as a test. Snippets marked this way @@ -37,6 +40,9 @@ are tests even if they don't have `// CONSOLE`. `// TEST[continued]` you can make tests that contain multiple command snippets and multiple response snippets. * `// TESTRESPONSE[s/foo/bar/]`: Substitutions. See `// TEST[s/foo/bar]`. + * `// TESTRESPONSE[_cat]`: Add substitutions for testing `_cat` responses. Use + this after all other substitutions so it doesn't make other substitutions + difficult. * `// TESTSETUP`: Marks this snippet as the "setup" for all other snippets in this file. This is a somewhat natural way of structuring documentation. You say "this is the data we use to explain this feature" then you add the diff --git a/docs/build.gradle b/docs/build.gradle index 3930b49cce5..3c305fd3910 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -24,8 +24,6 @@ apply plugin: 'elasticsearch.docs-test' * only remove entries from this list. When it is empty we'll remove it * entirely and have a party! There will be cake and everything.... */ buildRestTests.expectedUnconvertedCandidates = [ - 'reference/aggregations.asciidoc', - 'reference/aggregations/bucket/children-aggregation.asciidoc', 'reference/aggregations/bucket/datehistogram-aggregation.asciidoc', 'reference/aggregations/bucket/daterange-aggregation.asciidoc', 'reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc', @@ -95,21 +93,7 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/analysis/tokenfilters/stop-tokenfilter.asciidoc', 'reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc', 'reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc', - 'reference/cat.asciidoc', - 'reference/cat/alias.asciidoc', - 'reference/cat/allocation.asciidoc', - 'reference/cat/count.asciidoc', - 'reference/cat/fielddata.asciidoc', - 'reference/cat/health.asciidoc', - 'reference/cat/indices.asciidoc', - 'reference/cat/master.asciidoc', - 'reference/cat/nodeattrs.asciidoc', - 'reference/cat/nodes.asciidoc', - 'reference/cat/pending_tasks.asciidoc', - 'reference/cat/plugins.asciidoc', 'reference/cat/recovery.asciidoc', - 'reference/cat/repositories.asciidoc', - 'reference/cat/segments.asciidoc', 'reference/cat/shards.asciidoc', 'reference/cat/snapshots.asciidoc', 'reference/cat/templates.asciidoc', @@ -133,7 +117,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/docs/termvectors.asciidoc', 'reference/docs/update-by-query.asciidoc', 'reference/docs/update.asciidoc', - 'reference/getting-started.asciidoc', 'reference/index-modules/similarity.asciidoc', 'reference/index-modules/store.asciidoc', 'reference/index-modules/translog.asciidoc', @@ -147,7 +130,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/indices/shadow-replicas.asciidoc', 'reference/indices/shard-stores.asciidoc', 'reference/indices/update-settings.asciidoc', - 'reference/indices/upgrade.asciidoc', 'reference/ingest/ingest-node.asciidoc', 'reference/mapping/dynamic/templates.asciidoc', 'reference/mapping/fields/all-field.asciidoc', @@ -173,9 +155,7 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/search/request/highlighting.asciidoc', 'reference/search/request/inner-hits.asciidoc', 'reference/search/request/rescore.asciidoc', - 'reference/search/request/scroll.asciidoc', 'reference/search/search-template.asciidoc', - 'reference/search/suggesters/completion-suggest.asciidoc', ] integTest { @@ -194,7 +174,7 @@ integTest { configFile 'userdict_ja.txt' configFile 'KeywordTokenizer.rbbi' // Whitelist reindexing from the local node so we can test it. - setting 'reindex.remote.whitelist', 'myself' + setting 'reindex.remote.whitelist', '127.0.0.1:*' } } @@ -303,3 +283,26 @@ buildRestTests.setups['sales'] = ''' {"date": "2015/03/01 00:00:00", "price": 200, "type": "hat"} {"index":{}} {"date": "2015/03/01 00:00:00", "price": 175, "type": "t-shirt"}''' + +// Dummy bank account data used by getting-started.asciidoc +buildRestTests.setups['bank'] = ''' + - do: + bulk: + index: bank + type: account + refresh: true + body: | +#bank_data# +''' +/* Load the actual accounts only if we're going to use them. This complicates + * dependency checking but that is a small price to pay for not building a + * 400kb string every time we start the build. */ +File accountsFile = new File("$projectDir/src/test/resources/accounts.json") +buildRestTests.inputs.file(accountsFile) +buildRestTests.doFirst { + String accounts = accountsFile.getText('UTF-8') + // Indent like a yaml test needs + accounts = accounts.replaceAll('(?m)^', ' ') + buildRestTests.setups['bank'] = + buildRestTests.setups['bank'].replace('#bank_data#', accounts) +} diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index 707d976536b..a5af5ff5c20 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -147,6 +147,8 @@ Also see the {client}/php-api/current/index.html[official Elasticsearch PHP clie * http://github.com/nervetattoo/elasticsearch[elasticsearch] PHP client. +* https://github.com/madewithlove/elasticsearcher[elasticsearcher] Agnostic lightweight package on top of the Elasticsearch PHP client. Its main goal is to allow for easier structuring of queries and indices in your application. It does not want to hide or replace functionality of the Elasticsearch PHP client. + [[python]] == Python diff --git a/docs/groovy-api/index.asciidoc b/docs/groovy-api/index.asciidoc index 3ed4ff9e2e6..a140c161769 100644 --- a/docs/groovy-api/index.asciidoc +++ b/docs/groovy-api/index.asciidoc @@ -1,7 +1,7 @@ = Groovy API :ref: http://www.elastic.co/guide/en/elasticsearch/reference/current :java: http://www.elastic.co/guide/en/elasticsearch/client/java-api/current -:version: 5.0.0-alpha5 +:version: 6.0.0-alpha1 [preface] == Preface diff --git a/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc b/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc index e49962a58f7..ef91d0b7000 100644 --- a/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc +++ b/docs/java-api/aggregations/metrics/geobounds-aggregation.asciidoc @@ -1,5 +1,5 @@ [[java-aggs-metrics-geobounds]] -==== Cardinality Aggregation +==== Geo Bounds Aggregation Here is how you can use {ref}/search-aggregations-metrics-geobounds-aggregation.html[Geo Bounds Aggregation] diff --git a/docs/java-api/search.asciidoc b/docs/java-api/search.asciidoc index 8fd47762ade..2da24e93c22 100644 --- a/docs/java-api/search.asciidoc +++ b/docs/java-api/search.asciidoc @@ -58,7 +58,7 @@ SearchResponse scrollResp = client.prepareSearch(test) .addSort(FieldSortBuilder.DOC_FIELD_NAME, SortOrder.ASC) .setScroll(new TimeValue(60000)) .setQuery(qb) - .setSize(100).execute().actionGet(); //100 hits per shard will be returned for each scroll + .setSize(100).execute().actionGet(); //max of 100 hits will be returned for each scroll //Scroll until no hits are returned do { for (SearchHit hit : scrollResp.getHits().getHits()) { @@ -68,10 +68,6 @@ do { scrollResp = client.prepareSearchScroll(scrollResp.getScrollId()).setScroll(new TimeValue(60000)).execute().actionGet(); } while(scrollResp.getHits().getHits().length != 0); // Zero hits mark the end of the scroll and the while loop. -------------------------------------------------- -[NOTE] -==== -The size-parameter is per shard, so if you run a query against multiple indices (leading to many shards being involved in the query) the result might be more documents per execution of the scroll than you would expect! -==== [[java-search-msearch]] === MultiSearch API @@ -81,12 +77,12 @@ documentation [source,java] -------------------------------------------------- -SearchRequestBuilder srb1 = node.client() +SearchRequestBuilder srb1 = client .prepareSearch().setQuery(QueryBuilders.queryStringQuery("elasticsearch")).setSize(1); -SearchRequestBuilder srb2 = node.client() +SearchRequestBuilder srb2 = client .prepareSearch().setQuery(QueryBuilders.matchQuery("name", "kimchy")).setSize(1); -MultiSearchResponse sr = node.client().prepareMultiSearch() +MultiSearchResponse sr = client.prepareMultiSearch() .add(srb1) .add(srb2) .execute().actionGet(); @@ -107,7 +103,7 @@ The following code shows how to add two aggregations within your search: [source,java] -------------------------------------------------- -SearchResponse sr = node.client().prepareSearch() +SearchResponse sr = client.prepareSearch() .setQuery(QueryBuilders.matchAllQuery()) .addAggregation( AggregationBuilders.terms("agg1").field("field") diff --git a/docs/java-rest/usage.asciidoc b/docs/java-rest/usage.asciidoc index 55eac8e4668..7ca8456d7d1 100644 --- a/docs/java-rest/usage.asciidoc +++ b/docs/java-rest/usage.asciidoc @@ -51,8 +51,8 @@ https://hc.apache.org/httpcomponents-core-ga/httpcore/apidocs/org/apache/http/Ht [source,java] -------------------------------------------------- RestClient restClient = RestClient.builder( - new HttpHost("http", "localhost", 9200), - new HttpHost("http", "localhost", 9201)).build(); + new HttpHost("localhost", 9200, "http"), + new HttpHost("localhost", 9201, "http")).build(); -------------------------------------------------- The `RestClient` class is thread-safe and ideally has the same lifecycle as @@ -117,7 +117,7 @@ Response performRequest(String method, String endpoint, Response performRequest(String method, String endpoint, Map params, HttpEntity entity, - HttpAsyncResponseConsumer responseConsumer, + HttpAsyncResponseConsumerFactory responseConsumerFactory, Header... headers) throws IOException; @@ -141,7 +141,7 @@ void performRequestAsync(String method, String endpoint, Map params, HttpEntity entity, ResponseListener responseListener, - HttpAsyncResponseConsumer responseConsumer, + HttpAsyncResponseConsumerFactory responseConsumerFactory, Header... headers); -------------------------------------------------- @@ -155,11 +155,12 @@ call (e.g. `/_cluster/health`) `params`:: the optional parameters to be sent as querystring parameters `entity`:: the optional request body enclosed in an `org.apache.http.HttpEntity` object -`responseConsumer`:: the optional +`responseConsumerFactory`:: the optional factory that is used to create an http://hc.apache.org/httpcomponents-core-ga/httpcore-nio/apidocs/org/apache/http/nio/protocol/HttpAsyncResponseConsumer.html[`org.apache.http.nio.protocol.HttpAsyncResponseConsumer`] - callback. Controls how the response body gets streamed from a non-blocking -HTTP connection on the client side. When not provided, the default -implementation is used which buffers the whole response body in heap memory + callback instance per request attempt. Controls how the response body gets + streamed from a non-blocking HTTP connection on the client side. When not + provided, the default implementation is used which buffers the whole response + body in heap memory, up to 100 MB `responseListener`:: the listener to be notified upon asynchronous request success or failure `headers`:: optional request headers diff --git a/docs/plugins/alerting.asciidoc b/docs/plugins/alerting.asciidoc index 9472dbb6382..1e365306a84 100644 --- a/docs/plugins/alerting.asciidoc +++ b/docs/plugins/alerting.asciidoc @@ -8,11 +8,10 @@ Alerting plugins allow Elasticsearch to monitor indices and to trigger alerts wh The core alerting plugins are: -link:/products/watcher[Watcher]:: +link:/products/x-pack/alerting[X-Pack]:: -Watcher is the alerting and notification product for Elasticsearch that lets -you take action based on changes in your data. It is designed around the +X-Pack contains the alerting and notification product for Elasticsearch that +lets you take action based on changes in your data. It is designed around the principle that if you can query something in Elasticsearch, you can alert on it. Simply define a query, condition, schedule, and the actions to take, and -Watcher will do the rest. - +X-Pack will do the rest. diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc index 1677634bb56..e2d65c95688 100644 --- a/docs/plugins/analysis-icu.asciidoc +++ b/docs/plugins/analysis-icu.asciidoc @@ -7,6 +7,23 @@ libraries, including better analysis of Asian languages, Unicode normalization, Unicode-aware case folding, collation support, and transliteration. +[IMPORTANT] +.ICU analysis and backwards compatibility +================================================ + +From time to time, the ICU library receives updates such as adding new +characters and emojis, and improving collation (sort) orders. These changes +may or may not affect search and sort orders, depending on which characters +sets you are using. + +While we restrict ICU upgrades to major versions, you may find that an index +created in the previous major version will need to be reindexed in order to +return correct (and correctly ordered) results, and to take advantage of new +characters. + +================================================ + + [[analysis-icu-install]] [float] ==== Installation @@ -22,7 +39,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/analysis-icu/{version}/analysis-icu-{version}.zip. +{plugin_url}/analysis-icu/analysis-icu-{version}.zip. [[analysis-icu-remove]] [float] @@ -164,7 +181,11 @@ PUT icu_sample } } -POST icu_sample/_analyze?analyzer=my_analyzer&text=Elasticsearch. Wow! +GET icu_sample/_analyze +{ + "analyzer": "my_analyzer", + "text": "Elasticsearch. Wow!" +} -------------------------------------------------- // CONSOLE @@ -480,18 +501,21 @@ PUT icu_sample } } -GET icu_sample/_analyze?analyzer=latin +GET icu_sample/_analyze { + "analyzer": "latin", "text": "你好" <2> } -GET icu_sample/_analyze?analyzer=latin +GET icu_sample/_analyze { + "analyzer": "latin", "text": "здравствуйте" <3> } -GET icu_sample/_analyze?analyzer=latin +GET icu_sample/_analyze { + "analyzer": "latin", "text": "こんにちは" <4> } diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc index 6b3dc0a72f1..69907b9812d 100644 --- a/docs/plugins/analysis-kuromoji.asciidoc +++ b/docs/plugins/analysis-kuromoji.asciidoc @@ -19,7 +19,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/analysis-kuromoji/{version}/analysis-kuromoji-{version}.zip. +{plugin_url}/analysis-kuromoji/analysis-kuromoji-{version}.zip. [[analysis-kuromoji-remove]] [float] @@ -175,7 +175,11 @@ PUT kuromoji_sample } } -POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=東京スカイツリー +GET kuromoji_sample/_analyze +{ + "analyzer": "my_analyzer", + "text": "東京スカイツリー" +} -------------------------------------------------- // CONSOLE @@ -228,7 +232,11 @@ PUT kuromoji_sample } } -POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=飲み +GET kuromoji_sample/_analyze +{ + "analyzer": "my_analyzer", + "text": "飲み" +} -------------------------------------------------- // CONSOLE @@ -290,7 +298,11 @@ PUT kuromoji_sample } } -POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=寿司がおいしいね +GET kuromoji_sample/_analyze +{ + "analyzer": "my_analyzer", + "text": "寿司がおいしいね" +} -------------------------------------------------- // CONSOLE @@ -363,9 +375,17 @@ PUT kuromoji_sample } } -POST kuromoji_sample/_analyze?analyzer=katakana_analyzer&text=寿司 <1> +GET kuromoji_sample/_analyze +{ + "analyzer": "katakana_analyzer", + "text": "寿司" <1> +} -POST kuromoji_sample/_analyze?analyzer=romaji_analyzer&text=寿司 <2> +GET kuromoji_sample/_analyze +{ + "analyzer": "romaji_analyzer", + "text": "寿司" <2> +} -------------------------------------------------- // CONSOLE @@ -413,9 +433,17 @@ PUT kuromoji_sample } } -POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=コピー <1> +GET kuromoji_sample/_analyze +{ + "analyzer": "my_analyzer", + "text": "コピー" <1> +} -POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=サーバー <2> +GET kuromoji_sample/_analyze +{ + "analyzer": "my_analyzer", + "text": "サーバー" <2> +} -------------------------------------------------- // CONSOLE @@ -424,7 +452,7 @@ POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=サーバー <2> [[analysis-kuromoji-stop]] -===== `ja_stop` token filter +==== `ja_stop` token filter The `ja_stop` token filter filters out Japanese stopwords (`_japanese_`), and any other custom stopwords specified by the user. This filter only supports @@ -461,7 +489,11 @@ PUT kuromoji_sample } } -POST kuromoji_sample/_analyze?analyzer=analyzer_with_ja_stop&text=ストップは消える +GET kuromoji_sample/_analyze +{ + "analyzer": "analyzer_with_ja_stop", + "text": "ストップは消える" +} -------------------------------------------------- // CONSOLE @@ -482,7 +514,7 @@ The above request returns: // TESTRESPONSE [[analysis-kuromoji-number]] -===== `kuromoji_number` token filter +==== `kuromoji_number` token filter The `kuromoji_number` token filter normalizes Japanese numbers (kansūji) to regular Arabic decimal numbers in half-width characters. For example: @@ -507,7 +539,11 @@ PUT kuromoji_sample } } -POST kuromoji_sample/_analyze?analyzer=my_analyzer&text=一〇〇〇 +GET kuromoji_sample/_analyze +{ + "analyzer": "my_analyzer", + "text": "一〇〇〇" +} -------------------------------------------------- // CONSOLE diff --git a/docs/plugins/analysis-phonetic.asciidoc b/docs/plugins/analysis-phonetic.asciidoc index 0544900a8ca..fb0f6832d4b 100644 --- a/docs/plugins/analysis-phonetic.asciidoc +++ b/docs/plugins/analysis-phonetic.asciidoc @@ -20,7 +20,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/analysis-phonetic/{version}/analysis-phonetic-{version}.zip. +{plugin_url}/analysis-phonetic/analysis-phonetic-{version}.zip. [[analysis-phonetic-remove]] [float] @@ -82,7 +82,11 @@ PUT phonetic_sample } } -POST phonetic_sample/_analyze?analyzer=my_analyzer&text=Joe Bloggs <1> +GET phonetic_sample/_analyze +{ + "analyzer": "my_analyzer", + "text": "Joe Bloggs" <1> +} -------------------------------------------------- // CONSOLE diff --git a/docs/plugins/analysis-smartcn.asciidoc b/docs/plugins/analysis-smartcn.asciidoc index 5d1c13ef043..22953c590a4 100644 --- a/docs/plugins/analysis-smartcn.asciidoc +++ b/docs/plugins/analysis-smartcn.asciidoc @@ -25,7 +25,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/analysis-smartcn/{version}/analysis-smartcn-{version}.zip. +{plugin_url}/analysis-smartcn/analysis-smartcn-{version}.zip. [[analysis-smartcn-remove]] [float] diff --git a/docs/plugins/analysis-stempel.asciidoc b/docs/plugins/analysis-stempel.asciidoc index d2502521b06..ae72c80c92e 100644 --- a/docs/plugins/analysis-stempel.asciidoc +++ b/docs/plugins/analysis-stempel.asciidoc @@ -22,7 +22,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/analysis-stempel/{version}/analysis-stempel-{version}.zip. +{plugin_url}/analysis-stempel/analysis-stempel-{version}.zip. [[analysis-stempel-remove]] [float] diff --git a/docs/plugins/analysis-ukrainian.asciidoc b/docs/plugins/analysis-ukrainian.asciidoc new file mode 100644 index 00000000000..78f8232f1c1 --- /dev/null +++ b/docs/plugins/analysis-ukrainian.asciidoc @@ -0,0 +1,42 @@ +[[analysis-ukrainian]] +=== Ukrainian Analysis Plugin + +The Ukrainian Analysis plugin integrates Lucene's UkrainianMorfologikAnalyzer into elasticsearch. + +It provides stemming for Ukrainian using the http://github.com/morfologik/morfologik-stemming[Morfologik project]. + +[[analysis-ukrainian-install]] +[float] +==== Installation + +This plugin can be installed using the plugin manager: + +[source,sh] +---------------------------------------------------------------- +sudo bin/elasticsearch-plugin install analysis-ukrainian +---------------------------------------------------------------- + +The plugin must be installed on every node in the cluster, and each node must +be restarted after installation. + +This plugin can be downloaded for <> from +{plugin_url}/analysis-ukrainian/analysis-ukrainian-{version}.zip. + +[[analysis-ukrainian-remove]] +[float] +==== Removal + +The plugin can be removed with the following command: + +[source,sh] +---------------------------------------------------------------- +sudo bin/elasticsearch-plugin remove analysis-ukrainian +---------------------------------------------------------------- + +The node must be stopped before removing the plugin. + +[[analysis-ukrainian-analyzer]] +[float] +==== `ukrainian` analyzer + +The plugin provides the `ukrainian` analyzer. diff --git a/docs/plugins/analysis.asciidoc b/docs/plugins/analysis.asciidoc index 884dc2aebae..3c3df021de5 100644 --- a/docs/plugins/analysis.asciidoc +++ b/docs/plugins/analysis.asciidoc @@ -36,16 +36,18 @@ segmented into words. Provides high quality stemming for Polish. +<>:: + +Provides stemming for Ukrainian. + [float] ==== Community contributed analysis plugins A number of analysis plugins have been contributed by our community: -* https://github.com/yakaz/elasticsearch-analysis-combo/[Combo Analysis Plugin] (by Olivier Favre, Yakaz) * https://github.com/synhershko/elasticsearch-analysis-hebrew[Hebrew Analysis Plugin] (by Itamar Syn-Hershko) * https://github.com/medcl/elasticsearch-analysis-ik[IK Analysis Plugin] (by Medcl) * https://github.com/medcl/elasticsearch-analysis-mmseg[Mmseg Analysis Plugin] (by Medcl) -* https://github.com/chytreg/elasticsearch-analysis-morfologik[Morfologik (Polish) Analysis plugin] (by chytreg) * https://github.com/imotov/elasticsearch-analysis-morphology[Russian and English Morphological Analysis Plugin] (by Igor Motov) * https://github.com/medcl/elasticsearch-analysis-pinyin[Pinyin Analysis Plugin] (by Medcl) * https://github.com/duydo/elasticsearch-analysis-vietnamese[Vietnamese Analysis Plugin] (by Duy Do) @@ -62,5 +64,4 @@ include::analysis-smartcn.asciidoc[] include::analysis-stempel.asciidoc[] - - +include::analysis-ukrainian.asciidoc[] diff --git a/docs/plugins/api.asciidoc b/docs/plugins/api.asciidoc index 54edcbc7f0e..a2fbc5165ac 100644 --- a/docs/plugins/api.asciidoc +++ b/docs/plugins/api.asciidoc @@ -14,10 +14,6 @@ A number of plugins have been contributed by our community: * https://github.com/wikimedia/search-extra[Elasticsearch Trigram Accelerated Regular Expression Filter]: (by Wikimedia Foundation/Nik Everett) -* https://github.com/kzwang/elasticsearch-image[Elasticsearch Image Plugin]: - Uses https://code.google.com/p/lire/[Lire (Lucene Image Retrieval)] to allow users - to index images and search for similar images (by Kevin Wang) - * https://github.com/wikimedia/search-highlighter[Elasticsearch Experimental Highlighter]: (by Wikimedia Foundation/Nik Everett) @@ -30,7 +26,4 @@ A number of plugins have been contributed by our community: * https://github.com/codelibs/elasticsearch-taste[Elasticsearch Taste Plugin]: Mahout Taste-based Collaborative Filtering implementation (by CodeLibs Project) -* https://github.com/hadashiA/elasticsearch-flavor[Elasticsearch Flavor Plugin] using - http://mahout.apache.org/[Mahout] Collaboration filtering (by hadashiA) * https://github.com/jurgc11/es-change-feed-plugin[WebSocket Change Feed Plugin] (by ForgeRock/Chris Clifton) - diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc index 342d05d6faf..f69df7f5171 100644 --- a/docs/plugins/discovery-azure-classic.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -22,7 +22,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/discovery-azure-classic/{version}/discovery-azure-classic-{version}.zip. +{plugin_url}/discovery-azure-classic/discovery-azure-classic-{version}.zip. [[discovery-azure-classic-remove]] [float] @@ -56,7 +56,7 @@ cloud: type: pkcs12 discovery: - type: azure + zen.hosts_provider: azure ---- [IMPORTANT] diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index 8520cf1d16e..4c21f7fcc92 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -20,7 +20,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/discovery-ec2/{version}/discovery-ec2-{version}.zip. +{plugin_url}/discovery-ec2/discovery-ec2-{version}.zip. [[discovery-ec2-remove]] [float] @@ -139,7 +139,7 @@ environments). Here is a simple sample configuration: [source,yaml] ---- discovery: - type: ec2 + zen.hosts_provider: ec2 ---- You must also set `cloud.aws.region` if you are not using default AWS region. See <> for details. diff --git a/docs/plugins/discovery-file.asciidoc b/docs/plugins/discovery-file.asciidoc index a848cdd6ff1..15175620d52 100644 --- a/docs/plugins/discovery-file.asciidoc +++ b/docs/plugins/discovery-file.asciidoc @@ -39,11 +39,15 @@ The node must be stopped before removing the plugin. The file-based discovery plugin provides the ability to specify the unicast hosts list through a simple `unicast_hosts.txt` file that can -be dynamically updated at any time. The discovery type for this plugin -is still the default `zen` plugin, so no changes are required to the -`elasticsearch.yml` config file. This plugin simply provides a facility -to supply the unicast hosts list for zen discovery through an external -file that can be updated at any time by a side process. +be dynamically updated at any time. To enable, add the following in `elasticsearch.yml`: + +[source,yaml] +---- +discovery.zen.hosts_provider: file +---- + +This plugin simply provides a facility to supply the unicast hosts list for +zen discovery through an external file that can be updated at any time by a side process. For example, this gives a convenient mechanism for an Elasticsearch instance that is run in docker containers to be dynamically supplied a list of IP @@ -89,5 +93,5 @@ running on the default port: ---------------------------------------------------------------- Host names are allowed instead of IP addresses (similar to -`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be +`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be specified in brackets with the port coming after the brackets. diff --git a/docs/plugins/discovery-gce.asciidoc b/docs/plugins/discovery-gce.asciidoc index fee10a96cb7..1f19a379dc3 100644 --- a/docs/plugins/discovery-gce.asciidoc +++ b/docs/plugins/discovery-gce.asciidoc @@ -18,7 +18,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/discovery-gce/{version}/discovery-gce-{version}.zip. +{plugin_url}/discovery-gce/discovery-gce-{version}.zip. [[discovery-gce-remove]] [float] @@ -46,7 +46,7 @@ cloud: project_id: zone: discovery: - type: gce + zen.hosts_provider: gce -------------------------------------------------- The following gce settings (prefixed with `cloud.gce`) are supported: diff --git a/docs/plugins/discovery.asciidoc b/docs/plugins/discovery.asciidoc index 96a1c1e7b28..39afbea96dc 100644 --- a/docs/plugins/discovery.asciidoc +++ b/docs/plugins/discovery.asciidoc @@ -30,9 +30,7 @@ The File-based discovery plugin allows providing the unicast hosts list through A number of discovery plugins have been contributed by our community: -* https://github.com/grantr/elasticsearch-srv-discovery[DNS SRV Discovery Plugin] (by Grant Rodgers) * https://github.com/shikhar/eskka[eskka Discovery Plugin] (by Shikhar Bhushan) -* https://github.com/grmblfrz/elasticsearch-zookeeper[ZooKeeper Discovery Plugin] (by Sonian Inc.) * https://github.com/fabric8io/elasticsearch-cloud-kubernetes[Kubernetes Discovery Plugin] (by Jimmi Dyson, http://fabric8.io[fabric8]) include::discovery-ec2.asciidoc[] @@ -42,4 +40,3 @@ include::discovery-azure-classic.asciidoc[] include::discovery-gce.asciidoc[] include::discovery-file.asciidoc[] - diff --git a/docs/plugins/index.asciidoc b/docs/plugins/index.asciidoc index f969dc49182..ec1954a86a8 100644 --- a/docs/plugins/index.asciidoc +++ b/docs/plugins/index.asciidoc @@ -3,8 +3,8 @@ :ref: https://www.elastic.co/guide/en/elasticsearch/reference/master :guide: https://www.elastic.co/guide :version: 6.0.0-alpha1 -:lucene_version: 6.2.0 -:plugin_url: https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin +:lucene_version: 6.3.0 +:plugin_url: https://artifacts.elastic.co/downloads/elasticsearch-plugins [[intro]] == Introduction to plugins diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc index dc9c53412fe..a0a4d70cbcf 100644 --- a/docs/plugins/ingest-attachment.asciidoc +++ b/docs/plugins/ingest-attachment.asciidoc @@ -26,7 +26,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/ingest-attachment/{version}/ingest-attachment-{version}.zip. +{plugin_url}/ingest-attachment/ingest-attachment-{version}.zip. [[ingest-attachment-remove]] [float] diff --git a/docs/plugins/ingest-geoip.asciidoc b/docs/plugins/ingest-geoip.asciidoc index 9b8e6824fb9..0481ad40ab6 100644 --- a/docs/plugins/ingest-geoip.asciidoc +++ b/docs/plugins/ingest-geoip.asciidoc @@ -27,7 +27,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/ingest-geoip/{version}/ingest-geoip-{version}.zip. +{plugin_url}/ingest-geoip/ingest-geoip-{version}.zip. [[ingest-geoip-remove]] [float] diff --git a/docs/plugins/ingest-user-agent.asciidoc b/docs/plugins/ingest-user-agent.asciidoc index fc4d35ebfe8..250051d17d5 100644 --- a/docs/plugins/ingest-user-agent.asciidoc +++ b/docs/plugins/ingest-user-agent.asciidoc @@ -21,7 +21,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/ingest-user-agent/{version}/ingest-user-agent-{version}.zip. +{plugin_url}/ingest-user-agent/ingest-user-agent-{version}.zip. [[ingest-user-agent-remove]] [float] diff --git a/docs/plugins/integrations.asciidoc b/docs/plugins/integrations.asciidoc index d39d02efeac..503f1274d81 100644 --- a/docs/plugins/integrations.asciidoc +++ b/docs/plugins/integrations.asciidoc @@ -201,4 +201,3 @@ These projects appear to have been abandoned: D3. * https://github.com/OlegKunitsyn/eslogd[eslogd]: Linux daemon that replicates events to a central Elasticsearch server in realtime ->>>>>>> 02602a3... Update integrations.asciidoc (#18915) diff --git a/docs/plugins/lang-javascript.asciidoc b/docs/plugins/lang-javascript.asciidoc index 0670b0adadd..001b7f22231 100644 --- a/docs/plugins/lang-javascript.asciidoc +++ b/docs/plugins/lang-javascript.asciidoc @@ -22,7 +22,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/lang-javascript/{version}/lang-javascript-{version}.zip. +{plugin_url}/lang-javascript/lang-javascript-{version}.zip. [[lang-javascript-remove]] [float] @@ -120,7 +120,7 @@ GET test/_search "function_score": { "script_score": { "script": { - "id": "my_script", <2> + "stored": "my_script", <2> "lang": "javascript", "params": { "factor": 2 diff --git a/docs/plugins/lang-python.asciidoc b/docs/plugins/lang-python.asciidoc index 55de5525378..0e328d79f60 100644 --- a/docs/plugins/lang-python.asciidoc +++ b/docs/plugins/lang-python.asciidoc @@ -21,7 +21,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/lang-python/{version}/lang-python-{version}.zip. +{plugin_url}/lang-python/lang-python-{version}.zip. [[lang-python-remove]] [float] @@ -119,7 +119,7 @@ GET test/_search "function_score": { "script_score": { "script": { - "id": "my_script", <2> + "stored": "my_script", <2> "lang": "python", "params": { "factor": 2 diff --git a/docs/plugins/management.asciidoc b/docs/plugins/management.asciidoc index b48d29da3ab..243aaf47e33 100644 --- a/docs/plugins/management.asciidoc +++ b/docs/plugins/management.asciidoc @@ -1,19 +1,16 @@ [[management]] -== Management and Site Plugins +== Management Plugins -Management and site plugins offer UIs for managing and interacting with -Elasticsearch. +Management plugins offer UIs for managing and interacting with Elasticsearch. [float] === Core management plugins The core management plugins are: -link:/products/marvel[Marvel]:: +link:/products/x-pack/monitoring[X-Pack]:: -Marvel is a management and monitoring product for Elasticsearch. Marvel +X-Pack contains the management and monitoring features for Elasticsearch. It aggregates cluster wide statistics and events and offers a single interface to -view and analyze them. Marvel is free for development use but requires a -license to run in production. - - +view and analyze them. You can get a link:/subscriptions[free license] for basic +monitoring or a higher level license for more advanced needs. diff --git a/docs/plugins/mapper-murmur3.asciidoc b/docs/plugins/mapper-murmur3.asciidoc index fd9f29ceb26..63623f109e0 100644 --- a/docs/plugins/mapper-murmur3.asciidoc +++ b/docs/plugins/mapper-murmur3.asciidoc @@ -20,7 +20,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/mapper-murmur3/{version}/mapper-murmur3-{version}.zip. +{plugin_url}/mapper-murmur3/mapper-murmur3-{version}.zip. [[mapper-murmur3-remove]] [float] diff --git a/docs/plugins/mapper-size.asciidoc b/docs/plugins/mapper-size.asciidoc index 76fdf71a679..8374c09733a 100644 --- a/docs/plugins/mapper-size.asciidoc +++ b/docs/plugins/mapper-size.asciidoc @@ -20,7 +20,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/mapper-size/{version}/mapper-size-{version}.zip. +{plugin_url}/mapper-size/mapper-size-{version}.zip. [[mapper-size-remove]] [float] diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index e8235d75829..d09ccc34905 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -141,25 +141,6 @@ sudo bin/elasticsearch-plugin -Epath.conf=/path/to/custom/config/dir install > from -{plugin_url}/repository-azure/{version}/repository-azure-{version}.zip. +{plugin_url}/repository-azure/repository-azure-{version}.zip. [[repository-azure-remove]] [float] diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index d9ef6f5d2ff..7b222c9a3f7 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -21,7 +21,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/repository-gcs/{version}/repository-gcs-{version}.zip. +{plugin_url}/repository-gcs/repository-gcs-{version}.zip. [[repository-gcs-remove]] [float] diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc index ab620b58cb7..f494b8089ff 100644 --- a/docs/plugins/repository-hdfs.asciidoc +++ b/docs/plugins/repository-hdfs.asciidoc @@ -19,7 +19,7 @@ The plugin must be installed on _every_ node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/repository-hdfs/{version}/repository-hdfs-{version}.zip. +{plugin_url}/repository-hdfs/repository-hdfs-{version}.zip. [[repository-hdfs-remove]] [float] diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 0671c84c2b3..46789cf3f6e 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -21,7 +21,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/repository-s3/{version}/repository-s3-{version}.zip. +{plugin_url}/repository-s3/repository-s3-{version}.zip. [[repository-s3-remove]] [float] @@ -217,7 +217,8 @@ The following settings are supported: to split the chunk into several parts, each of `buffer_size` length, and to upload each part in its own request. Note that setting a buffer size lower than `5mb` is not allowed since it will prevents the use of the - Multipart API and may result in upload errors. Defaults to `100mb`. + Multipart API and may result in upload errors. Defaults to the minimum + between `100mb` and `5%` of the heap size. `max_retries`:: diff --git a/docs/plugins/security.asciidoc b/docs/plugins/security.asciidoc index 95ba68a6f05..d113c12bfc2 100644 --- a/docs/plugins/security.asciidoc +++ b/docs/plugins/security.asciidoc @@ -8,12 +8,12 @@ Security plugins add a security layer to Elasticsearch. The core security plugins are: -link:/products/shield[Shield]:: +link:/products/x-pack/security[X-Pack]:: -Shield is the Elastic product that makes it easy for anyone to add -enterprise-grade security to their ELK stack. Designed to address the growing security -needs of thousands of enterprises using ELK today, Shield provides peace of -mind when it comes to protecting your data. +X-Pack is the Elastic product that makes it easy for anyone to add +enterprise-grade security to their Elastic Stack. Designed to address the +growing security needs of thousands of enterprises using the Elastic Stack +today, X-Pack provides peace of mind when it comes to protecting your data. [float] === Community contributed security plugins @@ -25,4 +25,3 @@ The following plugins have been contributed by our community: * https://github.com/sscarduzio/elasticsearch-readonlyrest-plugin[Readonly REST]: High performance access control for Elasticsearch native REST API (by Simone Scarduzio) - diff --git a/docs/plugins/store-smb.asciidoc b/docs/plugins/store-smb.asciidoc index d2995189bd6..80d4547584d 100644 --- a/docs/plugins/store-smb.asciidoc +++ b/docs/plugins/store-smb.asciidoc @@ -18,7 +18,7 @@ The plugin must be installed on every node in the cluster, and each node must be restarted after installation. This plugin can be downloaded for <> from -{plugin_url}/store-smb/{version}/store-smb-{version}.zip. +{plugin_url}/store-smb/store-smb-{version}.zip. [[store-smb-remove]] [float] diff --git a/docs/reference/aggregations.asciidoc b/docs/reference/aggregations.asciidoc index ada134238bb..2b6523725b6 100644 --- a/docs/reference/aggregations.asciidoc +++ b/docs/reference/aggregations.asciidoc @@ -58,6 +58,7 @@ The following snippet captures the basic structure of aggregations: [,"" : { ... } ]* } -------------------------------------------------- +// NOTCONSOLE The `aggregations` object (the key `aggs` can also be used) in the JSON holds the aggregations to be computed. Each aggregation is associated with a logical name that the user defines (e.g. if the aggregation computes the average price, then it would diff --git a/docs/reference/aggregations/bucket/children-aggregation.asciidoc b/docs/reference/aggregations/bucket/children-aggregation.asciidoc index e69877d97f2..fa89314a230 100644 --- a/docs/reference/aggregations/bucket/children-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/children-aggregation.asciidoc @@ -10,14 +10,18 @@ This aggregation relies on the <> in the map For example, let's say we have an index of questions and answers. The answer type has the following `_parent` field in the mapping: [source,js] -------------------------------------------------- +PUT child_example { - "answer" : { - "_parent" : { - "type" : "question" + "mappings": { + "answer" : { + "_parent" : { + "type" : "question" + } } } } -------------------------------------------------- +// CONSOLE The question typed document contain a tag field and the answer typed documents contain an owner field. With the `children` aggregation the tag buckets can be mapped to the owner buckets in a single request even though the two fields exist in @@ -26,6 +30,7 @@ two different kinds of documents. An example of a question typed document: [source,js] -------------------------------------------------- +PUT child_example/question/1 { "body": "

    I have Windows 2003 server and i bought a new Windows 2008 server...", "title": "Whats the best way to file transfer my site from server to a newer one?", @@ -33,33 +38,49 @@ An example of a question typed document: "windows-server-2003", "windows-server-2008", "file-transfer" - ], + ] } -------------------------------------------------- +// CONSOLE +// TEST[continued] -An example of an answer typed document: +Examples of `answer` typed documents: [source,js] -------------------------------------------------- +PUT child_example/answer/1?parent=1&refresh { "owner": { "location": "Norfolk, United Kingdom", "display_name": "Sam", "id": 48 }, - "body": "

    Unfortunately your pretty much limited to FTP...", + "body": "

    Unfortunately you're pretty much limited to FTP...", "creation_date": "2009-05-04T13:45:37.030" } +PUT child_example/answer/2?parent=1&refresh +{ + "owner": { + "location": "Norfolk, United Kingdom", + "display_name": "Troll", + "id": 49 + }, + "body": "

    Use Linux...", + "creation_date": "2009-05-05T13:45:37.030" +} -------------------------------------------------- +// CONSOLE +// TEST[continued] The following request can be built that connects the two together: [source,js] -------------------------------------------------- +POST child_example/_search?size=0 { "aggs": { "top-tags": { "terms": { - "field": "tags", + "field": "tags.keyword", "size": 10 }, "aggs": { @@ -70,7 +91,7 @@ The following request can be built that connects the two together: "aggs": { "top-names": { "terms": { - "field": "owner.display_name", + "field": "owner.display_name.keyword", "size": 10 } } @@ -81,6 +102,8 @@ The following request can be built that connects the two together: } } -------------------------------------------------- +// CONSOLE +// TEST[continued] <1> The `type` points to type / mapping with the name `answer`. @@ -91,245 +114,74 @@ Possible response: [source,js] -------------------------------------------------- { + "timed_out": false, + "took": 25, + "_shards": { "total": 5, "successful": 5, "failed": 0 }, + "hits": { "total": 3, "max_score": 0.0, hits: [] }, "aggregations": { "top-tags": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, "buckets": [ { - "key": "windows-server-2003", - "doc_count": 25365, <1> + "key": "file-transfer", + "doc_count": 1, <1> "to-answers": { - "doc_count": 36004, <2> + "doc_count": 2, <2> "top-names": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, "buckets": [ { "key": "Sam", - "doc_count": 274 + "doc_count": 1 }, { - "key": "chris", - "doc_count": 19 - }, - { - "key": "david", - "doc_count": 14 - }, - ... + "key": "Troll", + "doc_count": 1 + } ] } } }, { - "key": "linux", - "doc_count": 18342, + "key": "windows-server-2003", + "doc_count": 1, <1> "to-answers": { - "doc_count": 6655, + "doc_count": 2, <2> "top-names": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, "buckets": [ { - "key": "abrams", - "doc_count": 25 + "key": "Sam", + "doc_count": 1 }, { - "key": "ignacio", - "doc_count": 25 - }, - { - "key": "vazquez", - "doc_count": 25 - }, - ... + "key": "Troll", + "doc_count": 1 + } ] } } }, { - "key": "windows", - "doc_count": 18119, + "key": "windows-server-2008", + "doc_count": 1, <1> "to-answers": { - "doc_count": 24051, + "doc_count": 2, <2> "top-names": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, "buckets": [ { - "key": "molly7244", - "doc_count": 265 + "key": "Sam", + "doc_count": 1 }, { - "key": "david", - "doc_count": 27 - }, - { - "key": "chris", - "doc_count": 26 - }, - ... - ] - } - } - }, - { - "key": "osx", - "doc_count": 10971, - "to-answers": { - "doc_count": 5902, - "top-names": { - "buckets": [ - { - "key": "diago", - "doc_count": 4 - }, - { - "key": "albert", - "doc_count": 3 - }, - { - "key": "asmus", - "doc_count": 3 - }, - ... - ] - } - } - }, - { - "key": "ubuntu", - "doc_count": 8743, - "to-answers": { - "doc_count": 8784, - "top-names": { - "buckets": [ - { - "key": "ignacio", - "doc_count": 9 - }, - { - "key": "abrams", - "doc_count": 8 - }, - { - "key": "molly7244", - "doc_count": 8 - }, - ... - ] - } - } - }, - { - "key": "windows-xp", - "doc_count": 7517, - "to-answers": { - "doc_count": 13610, - "top-names": { - "buckets": [ - { - "key": "molly7244", - "doc_count": 232 - }, - { - "key": "chris", - "doc_count": 9 - }, - { - "key": "john", - "doc_count": 9 - }, - ... - ] - } - } - }, - { - "key": "networking", - "doc_count": 6739, - "to-answers": { - "doc_count": 2076, - "top-names": { - "buckets": [ - { - "key": "molly7244", - "doc_count": 6 - }, - { - "key": "alnitak", - "doc_count": 5 - }, - { - "key": "chris", - "doc_count": 3 - }, - ... - ] - } - } - }, - { - "key": "mac", - "doc_count": 5590, - "to-answers": { - "doc_count": 999, - "top-names": { - "buckets": [ - { - "key": "abrams", - "doc_count": 2 - }, - { - "key": "ignacio", - "doc_count": 2 - }, - { - "key": "vazquez", - "doc_count": 2 - }, - ... - ] - } - } - }, - { - "key": "wireless-networking", - "doc_count": 4409, - "to-answers": { - "doc_count": 6497, - "top-names": { - "buckets": [ - { - "key": "molly7244", - "doc_count": 61 - }, - { - "key": "chris", - "doc_count": 5 - }, - { - "key": "mike", - "doc_count": 5 - }, - ... - ] - } - } - }, - { - "key": "windows-8", - "doc_count": 3601, - "to-answers": { - "doc_count": 4263, - "top-names": { - "buckets": [ - { - "key": "molly7244", - "doc_count": 3 - }, - { - "key": "msft", - "doc_count": 2 - }, - { - "key": "user172132", - "doc_count": 2 - }, - ... + "key": "Troll", + "doc_count": 1 + } ] } } @@ -339,6 +191,7 @@ Possible response: } } -------------------------------------------------- +// TESTRESPONSE[s/"took": 25/"took": $body.took/] -<1> The number of question documents with the tag `windows-server-2003`. -<2> The number of answer documents that are related to question documents with the tag `windows-server-2003`. +<1> The number of question documents with the tag `file-transfer`, `windows-server-2003`, etc. +<2> The number of answer documents that are related to question documents with the tag `file-transfer`, `windows-server-2003`, etc. diff --git a/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc b/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc index 70412d2680a..f5a45da976b 100644 --- a/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc @@ -1,5 +1,5 @@ [[search-aggregations-bucket-diversified-sampler-aggregation]] -=== Sampler Aggregation +=== Diversified Sampler Aggregation experimental[] @@ -24,7 +24,7 @@ Example: }, "aggs": { "sample": { - "sampler": { + "diversified_sampler": { "shard_size": 200, "field" : "user.id" }, @@ -63,7 +63,7 @@ Response: } -------------------------------------------------- -<1> 1000 documents were sampled in total becase we asked for a maximum of 200 from an index with 5 shards. The cost of performing the nested significant_terms aggregation was therefore limited rather than unbounded. +<1> 1000 documents were sampled in total because we asked for a maximum of 200 from an index with 5 shards. The cost of performing the nested significant_terms aggregation was therefore limited rather than unbounded. <2> The results of the significant_terms aggregation are not skewed by any single over-active Twitter user because we asked for a maximum of one tweet from any one user in our sample. @@ -92,7 +92,7 @@ Controlling diversity using a field: { "aggs" : { "sample" : { - "diverisfied_sampler" : { + "diversified_sampler" : { "field" : "author", "max_docs_per_value" : 3 } diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index 68b2e8511f9..fb3baca0967 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -499,7 +499,7 @@ TIP: for indexed scripts replace the `file` parameter with an `id` parameter. "aggs" : { "genres" : { "terms" : { - "field" : "gendre", + "field" : "gender", "script" : { "inline" : "'Genre: ' +_value" "lang" : "painless" diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index ba9899f9d68..3c3e9b8d13d 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -268,6 +268,4 @@ params:: Optional. An object whose contents will be passed as variable "_agg" : {} } -------------------------------------------------- -reduce_params:: Optional. An object whose contents will be passed as variables to the `reduce_script`. This can be useful to allow the user to control - the behavior of the reduce phase. If this is not specified the variable will be undefined in the reduce_script execution. diff --git a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc index fcb29104d67..252f535fd54 100644 --- a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc @@ -65,7 +65,7 @@ POST /sales/_search <1> `buckets_path` instructs this percentiles_bucket aggregation that we want to calculate percentiles for the `sales` aggregation in the `sales_per_month` date histogram. -<2> `percents` specifies which percentiles we wish to calculate, in this case, the 25th, 50th and 75th percentil +<2> `percents` specifies which percentiles we wish to calculate, in this case, the 25th, 50th and 75th percentiles. And the following may be the response: @@ -107,7 +107,7 @@ And the following may be the response: }, "percentiles_monthly_sales": { "values" : { - "25.0": 60.0, + "25.0": 375.0, "50.0": 375.0, "75.0": 550.0 } diff --git a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc index 7504be927d9..7d059253e70 100644 --- a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc @@ -169,11 +169,6 @@ The `pattern` analyzer accepts the following parameters: Should terms be lowercased or not. Defaults to `true`. -`max_token_length`:: - - The maximum token length. If a token is seen that exceeds this length then - it is split at `max_token_length` intervals. Defaults to `255`. - `stopwords`:: A pre-defined stop words list like `_english_` or an array containing a diff --git a/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc index 5aad28b4394..b175267296d 100644 --- a/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc @@ -2,7 +2,7 @@ === Lowercase Tokenizer -The `lowercase` toknenizer, like the +The `lowercase` tokenizer, like the <> breaks text into terms whenever it encounters a character which is not a letter, but it also lowecases all terms. It is functionally equivalent to the diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index d361f3d07d1..119bcdfd95c 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -84,11 +84,13 @@ Where: `date_format`:: is the optional format in which the computed date should be rendered. Defaults to `YYYY.MM.dd`. `time_zone`:: is the optional time zone . Defaults to `utc`. -You must enclose date math index name expressions within angle brackets. For example: +You must enclose date math index name expressions within angle brackets, and +all special characters should be URI encoded. For example: [source,js] ---------------------------------------------------------------------- -GET //_search +# GET //_search +GET /%3Clogstash-%7Bnow%2Fd%7D%3E/_search { "query" : { "match": { @@ -99,9 +101,23 @@ GET //_search ---------------------------------------------------------------------- // CONSOLE // TEST[s/^/PUT logstash-2016.09.20\n/] -// TEST[s/\{now/{2016.09.20||/] +// TEST[s/now/2016.09.20||/] -NOTE: The `/` used for date rounding must be url encoded as `%2F` in any url. +[NOTE] +.Percent encoding of date math characters +====================================================== +The special characters used for date rounding must be URI encoded as follows: + +[horizontal] +`<`:: `%3C` +`>`:: `%3E` +`/`:: `%2F` +`{`:: `%7B` +`}`:: `%7D` +`|`:: `%7C` +`+`:: `%2B` +`:`:: `%3A` +====================================================== The following example shows different forms of date math index names and the final index names they resolve to given the current time is 22rd March 2024 noon utc. @@ -127,7 +143,8 @@ three days, assuming the indices use the default Logstash index name format, [source,js] ---------------------------------------------------------------------- -GET /,,/_search +# GET /,,/_search +GET /%3Clogstash-%7Bnow%2Fd-2d%7D%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogstash-%7Bnow%2Fd%7D%3E/_search { "query" : { "match": { @@ -138,7 +155,7 @@ GET /,,/_searc ---------------------------------------------------------------------- // CONSOLE // TEST[s/^/PUT logstash-2016.09.20\nPUT logstash-2016.09.19\nPUT logstash-2016.09.18\n/] -// TEST[s/\{now/{2016.09.20||/] +// TEST[s/now/2016.09.20||/] [[common-options]] == Common options @@ -205,6 +222,7 @@ Some examples are: `2015-01-01||+1M/d`:: `2015-01-01` plus one month, rounded down to the nearest day. [float] +[[common-options-response-filtering]] === Response Filtering All REST APIs accept a `filter_path` parameter that can be used to reduce @@ -399,7 +417,8 @@ Returns: "index.number_of_shards": "1", "index.creation_date": "1474389951325", "index.uuid": "n6gzFZTgS664GUfx0Xrpjw", - "index.version.created": ... + "index.version.created": ..., + "index.provided_name" : "twitter" } } } @@ -432,7 +451,8 @@ Returns: "uuid": "n6gzFZTgS664GUfx0Xrpjw", "version": { "created": ... - } + }, + "provided_name" : "twitter" } } } diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index f29bc9badd2..e037907435d 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -25,46 +25,67 @@ the available commands. === Verbose Each of the commands accepts a query string parameter `v` to turn on -verbose output. +verbose output. For example: -[source,sh] +[source,js] -------------------------------------------------- -% curl 'localhost:9200/_cat/master?v' -id ip node -EGtKWZlWQYWDmX29fUnp3Q 127.0.0.1 Grey, Sara +GET /_cat/master?v -------------------------------------------------- +// CONSOLE + +Might respond with: + +[source,js] +-------------------------------------------------- +id host ip node +u_n93zwxThWHi1PDBJAGAg 127.0.0.1 127.0.0.1 u_n93zw +-------------------------------------------------- +// TESTRESPONSE[s/u_n93zw(xThWHi1PDBJAGAg)?/.+/ _cat] [float] [[help]] === Help Each of the commands accepts a query string parameter `help` which will -output its available columns. +output its available columns. For example: -[source,sh] +[source,js] -------------------------------------------------- -% curl 'localhost:9200/_cat/master?help' -id | node id -ip | node transport ip address -node | node name +GET /_cat/master?help -------------------------------------------------- +// CONSOLE + +Might respond respond with: + +[source,js] +-------------------------------------------------- +id | | node id +host | h | host name +ip | | ip address +node | n | node name +-------------------------------------------------- +// TESTRESPONSE[s/[|]/[|]/ _cat] [float] [[headers]] === Headers Each of the commands accepts a query string parameter `h` which forces -only those columns to appear. +only those columns to appear. For example: -[source,sh] +[source,js] -------------------------------------------------- -% curl 'n1:9200/_cat/nodes?h=ip,port,heapPercent,name' -192.168.56.40 9300 40.3 bGG90GE -192.168.56.20 9300 15.3 H5dfFeA -192.168.56.50 9300 17.0 I8hydUG -192.168.56.10 9300 12.3 DKDM97B -192.168.56.30 9300 43.9 6-bjhwl +GET /_cat/nodes?h=ip,port,heapPercent,name -------------------------------------------------- +// CONSOLE + +Responds with: + +[source,js] +-------------------------------------------------- +127.0.0.1 9300 27 sLBaIGK +-------------------------------------------------- +// TESTRESPONSE[s/9300 27 sLBaIGK/\\d+ \\d+ .+/ _cat] You can also request multiple columns using simple wildcards like `/_cat/thread_pool?h=ip,bulk.*` to get all headers (or aliases) starting @@ -94,6 +115,7 @@ green wiki2 3 0 10000 0 105274918 105274918 green wiki1 3 0 10000 413 103776272 103776272 green foo 1 0 227 0 2065131 2065131 -------------------------------------------------- +// NOTCONSOLE If you want to change the <>, use `time` parameter. @@ -106,7 +128,7 @@ If you want to change the <>, use `bytes` parameter. [source,sh] -------------------------------------------------- -% curl '192.168.56.10:9200/_cat/indices?format=json' | jq . +% curl 'localhost:9200/_cat/indices?format=json&pretty' [ { "pri.store.size": "650b", @@ -121,6 +143,7 @@ If you want to change the <>, use `bytes` parameter. } ] -------------------------------------------------- +// NOTCONSOLE Currently supported formats (for the `?format=` parameter): - text (default) @@ -135,7 +158,7 @@ For example: [source,sh] -------------------------------------------------- -% curl '192.168.56.10:9200/_cat/indices' -H "Accept: application/json" | jq . +% curl '192.168.56.10:9200/_cat/indices?pretty' -H "Accept: application/json" [ { "pri.store.size": "650b", @@ -150,6 +173,37 @@ For example: } ] -------------------------------------------------- +// NOTCONSOLE + +[float] +[[sort]] +=== Sort + +Each of the commands accepts a query string parameter `s` which sorts the table by +the columns specified as the parameter value. Columns are specified either by name or by +alias, and are provided as a comma separated string. By default, sorting is done in +ascending fashion. Appending `:desc` to a column will invert the ordering for +that column. `:asc` is also accepted but exhibits the same behavior as the default sort order. + +For example, with a sort string `s=column1,column2:desc,column3`, the table will be +sorted in ascending order by column1, in descending order by column2, and in ascending +order by column3. + +[source,sh] +-------------------------------------------------- +GET _cat/templates?v&s=order:desc,template +-------------------------------------------------- +//CONSOLE + +returns: + +[source,sh] +-------------------------------------------------- +name template order version +pizza_pepperoni *pepperoni* 2 +sushi_california_roll *avocado* 1 1 +pizza_hawaiian *pineapples* 1 +-------------------------------------------------- -- diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index a790b3d15cd..4c176499a2f 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -4,19 +4,55 @@ `aliases` shows information about currently configured aliases to indices including filter and routing infos. -[source,sh] +//// +Hidden setup for example: +[source,js] +-------------------------------------------------- +PUT test1 +{ + "aliases": { + "alias1": {}, + "alias2": { + "filter": { + "match": { + "user": "kimchy" + } + } + }, + "alias3": { + "routing": "1" + }, + "alias4": { + "index_routing": "2", + "search_routing": "1,2" + } + } +} +-------------------------------------------------- +// CONSOLE +//// + +[source,js] +-------------------------------------------------- +GET /_cat/aliases?v +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Might respond with: + +[source,txt] -------------------------------------------------- -% curl '192.168.56.10:9200/_cat/aliases?v' alias index filter routing.index routing.search -alias2 test1 * - - -alias4 test1 - 2 1,2 alias1 test1 - - - +alias2 test1 * - - alias3 test1 - 1 1 +alias4 test1 - 2 1,2 -------------------------------------------------- +// TESTRESPONSE[s/[*]/[*]/ _cat] The output shows that `alias` has configured a filter, and specific routing configurations in `alias3` and `alias4`. If you only want to get information about a single alias, you can specify the alias in the URL, for example `/_cat/aliases/alias1`. - diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index 7a0236b8495..ba702080e58 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -4,14 +4,22 @@ `allocation` provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. -[source,sh] +[source,js] -------------------------------------------------- -% curl '192.168.56.10:9200/_cat/allocation?v' -shards disk.indices disk.used disk.avail disk.total disk.percent host ip node - 1 3.1gb 5.6gb 72.2gb 77.8gb 7.8 192.168.56.10 192.168.56.10 bGG90GE - 1 3.1gb 5.6gb 72.2gb 77.8gb 7.8 192.168.56.30 192.168.56.30 I8hydUG - 1 3.0gb 5.5gb 72.3gb 77.8gb 7.6 192.168.56.20 192.168.56.20 H5dfFeA +GET /_cat/allocation?v -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT test\n{"settings": {"number_of_replicas": 0}}\n/] + +Might respond with: + +[source,txt] +-------------------------------------------------- +shards disk.indices disk.used disk.avail disk.total disk.percent host ip node + 5 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 +-------------------------------------------------- +// TESTRESPONSE[s/260b/\\d+b/ s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] +// TESTRESPONSE[s/CSUXak2/.+/ _cat] Here we can see that each node has been allocated a single shard and that they're all using about the same amount of space. diff --git a/docs/reference/cat/count.asciidoc b/docs/reference/cat/count.asciidoc index ff64f2bc73b..28dc39adc8d 100644 --- a/docs/reference/cat/count.asciidoc +++ b/docs/reference/cat/count.asciidoc @@ -4,17 +4,38 @@ `count` provides quick access to the document count of the entire cluster, or individual indices. -[source,sh] +[source,js] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/indices -green wiki1 3 0 10000 331 168.5mb 168.5mb -green wiki2 3 0 428 0 8mb 8mb - -% curl 192.168.56.10:9200/_cat/count -1384314124582 19:42:04 10428 - -% curl 192.168.56.10:9200/_cat/count/wiki2 -1384314139815 19:42:19 428 +GET /_cat/count?v -------------------------------------------------- +// CONSOLE +// TEST[setup:big_twitter] +// TEST[s/^/POST test\/test\?refresh\n{"test": "test"}\n/] + +Looks like: + +[source,txt] +-------------------------------------------------- +epoch timestamp count +1475868259 15:24:19 121 +-------------------------------------------------- +// TESTRESPONSE[s/1475868259 15:24:19/\\d+ \\d+:\\d+:\\d+/ _cat] + +Or for a single index: + +[source,js] +-------------------------------------------------- +GET /_cat/count/twitter?v +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,txt] +-------------------------------------------------- +epoch timestamp count +1475868259 15:24:20 120 +-------------------------------------------------- +// TESTRESPONSE[s/1475868259 15:24:20/\\d+ \\d+:\\d+:\\d+/ _cat] + NOTE: The document count indicates the number of live documents and does not include deleted documents which have not yet been cleaned up by the merge process. diff --git a/docs/reference/cat/fielddata.asciidoc b/docs/reference/cat/fielddata.asciidoc index 86b14c65f80..d6d7d872dc4 100644 --- a/docs/reference/cat/fielddata.asciidoc +++ b/docs/reference/cat/fielddata.asciidoc @@ -1,39 +1,98 @@ [[cat-fielddata]] == cat fielddata -`fielddata` shows how much heap memory is currently being used by fielddata +`fielddata` shows how much heap memory is currently being used by fielddata on every data node in the cluster. -[source,sh] + +//// +Hidden setup snippet to build an index with fielddata so our results are real: +[source,js] -------------------------------------------------- -% curl '192.168.56.10:9200/_cat/fielddata?v' -id host ip node field size -bGG90GEiSGeezlbrcugAYQ myhost1 10.20.100.200 bGG90GE body 159.8kb -bGG90GEiSGeezlbrcugAYQ myhost1 10.20.100.200 bGG90GE text 225.7kb -H5dfFeANQaCL6xC8VxjAwg myhost2 10.20.100.201 H5dfFeA body 159.8kb -H5dfFeANQaCL6xC8VxjAwg myhost2 10.20.100.201 H5dfFeA text 275.3kb -I8hydUG3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 I8hydUG body 109.2kb -I8hydUG3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 I8hydUG text 175.3kb +PUT test +{ + "mappings": { + "test": { + "properties": { + "body": { + "type": "text", + "fielddata":true + }, + "soul": { + "type": "text", + "fielddata":true + } + } + } + } +} +POST test/test?refresh +{ + "body": "some words so there is a little field data", + "soul": "some more words" +} + +# Perform a search to load the field data +POST test/_search?sort=body,soul -------------------------------------------------- +// CONSOLE +//// + +[source,js] +-------------------------------------------------- +GET /_cat/fielddata?v +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Looks like: + +[source,txt] +-------------------------------------------------- +id host ip node field size +Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body 544b +Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in soul 480b +-------------------------------------------------- +// TESTRESPONSE[s/544b|480b/\\d+(\\.\\d+)?[tgmk]?b/] +// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ s/soul|body/\\w+/ _cat] Fields can be specified either as a query parameter, or in the URL path: -[source,sh] +[source,js] -------------------------------------------------- -% curl '192.168.56.10:9200/_cat/fielddata?v&fields=body' -id host ip node field size -bGG90GEiSGeezlbrcugAYQ myhost1 10.20.100.200 bGG90GE body 159.8kb -H5dfFeANQaCL6xC8VxjAwg myhost2 10.20.100.201 H5dfFeA body 159.8kb -I8hydUG3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 I8hydUG body 109.2kb +GET /_cat/fielddata?v&fields=body +-------------------------------------------------- +// CONSOLE +// TEST[continued] -% curl '192.168.56.10:9200/_cat/fielddata/body,text?v' -id host ip node field size -bGG90GEiSGeezlbrcugAYQ myhost1 10.20.100.200 bGG90GE body 159.8kb -bGG90GEiSGeezlbrcugAYQ myhost1 10.20.100.200 bGG90GE text 225.7kb -H5dfFeANQaCL6xC8VxjAwg myhost2 10.20.100.201 H5dfFeA body 159.8kb -H5dfFeANQaCL6xC8VxjAwg myhost2 10.20.100.201 H5dfFeA text 275.3kb -I8hydUG3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 I8hydUG body 109.2kb -I8hydUG3R0q1AJ-HUEvkSQ myhost3 10.20.100.202 I8hydUG text 175.3kb +Which looks like: + +[source,txt] -------------------------------------------------- +id host ip node field size +Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body 544b +-------------------------------------------------- +// TESTRESPONSE[s/544b|480b/\\d+(\\.\\d+)?[tgmk]?b/] +// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ _cat] + +And it accepts a comma delimited list: + +[source,js] +-------------------------------------------------- +GET /_cat/fielddata/body,soul?v +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Which produces the same output as the first snippet: + +[source,txt] +-------------------------------------------------- +id host ip node field size +Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body 544b +Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in soul 480b +-------------------------------------------------- +// TESTRESPONSE[s/544b|480b/\\d+(\\.\\d+)?[tgmk]?b/] +// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ s/soul|body/\\w+/ _cat] The output shows the individual fielddata for the`body` and `text` fields, one row per field per node. diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index bf9b3f17443..cca24c66a36 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -2,17 +2,39 @@ == cat health `health` is a terse, one-line representation of the same information -from `/_cluster/health`. It has one option `ts` to disable the -timestamping. +from `/_cluster/health`. -[source,sh] +[source,js] -------------------------------------------------- -% curl localhost:9200/_cat/health -1384308967 18:16:07 foo green 3 3 3 3 0 0 0 -% curl 'localhost:9200/_cat/health?v&ts=0' -cluster status nodeTotal nodeData shards pri relo init unassign tasks -foo green 3 3 3 3 0 0 0 0 +GET /_cat/health?v -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT twitter\n{"settings":{"number_of_replicas": 0}}\n/] + +[source,txt] +-------------------------------------------------- +epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent +1475871424 16:17:04 elasticsearch green 1 1 5 5 0 0 0 0 - 100.0% +-------------------------------------------------- +// TESTRESPONSE[s/1475871424 16:17:04/\\d+ \\d+:\\d+:\\d+/ s/elasticsearch/[^ ]+/ s/0 -/\\d+ -/ _cat] + +It has one option `ts` to disable the timestamping: + +[source,js] +-------------------------------------------------- +GET /_cat/health?v&ts=0 +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT twitter\n{"settings":{"number_of_replicas": 0}}\n/] + +which looks like: + +[source,txt] +-------------------------------------------------- +cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent +elasticsearch green 1 1 5 5 0 0 0 0 - 100.0% +-------------------------------------------------- +// TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ -/ _cat] A common use of this command is to verify the health is consistent across nodes: @@ -27,6 +49,7 @@ across nodes: [3] 20:20:52 [SUCCESS] es2.vm 1384309218 18:20:18 foo green 3 3 3 3 0 0 0 0 -------------------------------------------------- +// NOTCONSOLE A less obvious use is to track recovery of a large cluster over time. With enough shards, starting a cluster, or even recovering after @@ -42,6 +65,7 @@ to track its progress is by using this command in a delayed loop: 1384309806 18:30:06 foo green 3 3 1832 916 4 0 0 ^C -------------------------------------------------- +// NOTCONSOLE In this scenario, we can tell that recovery took roughly four minutes. If this were going on for hours, we would be able to watch the diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 22be4067f8a..fa43e7c696a 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -2,19 +2,35 @@ == cat indices The `indices` command provides a cross-section of each index. This -information *spans nodes*. +information *spans nodes*. For example: -[source,sh] +[source,js] -------------------------------------------------- -% curl 'localhost:9200/_cat/indices/twi*?v' -health status index pri rep docs.count docs.deleted store.size pri.store.size -green open twitter 5 1 11434 0 64mb 32mb -green open twitter2 2 0 2030 0 5.8mb 5.8mb +GET /_cat/indices/twi*?v&s=index -------------------------------------------------- +// CONSOLE +// TEST[setup:huge_twitter] +// TEST[s/^/POST _flush\n/] +// TEST[s/^/PUT twitter2\n{"settings": {"number_of_replicas": 0}}\n/] +// We flush very early here because the index's size is cached and we sort on +// size below. So to get a realistic sort on size we need to flush here or else +// the size is just whatever portion of the index is pushed out of memory +// during test setup which isn't deterministic. + +Might respond with: + +[source,txt] +-------------------------------------------------- +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size +yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb +green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 5 0 0 0 260b 260b +-------------------------------------------------- +// TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] +// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] We can tell quickly how many shards make up an index, the number of -docs at the Lucene level, including hidden docs (e.g., from nested types), -deleted docs, primary store size, and total store size (all shards including replicas). +docs at the Lucene level, including hidden docs (e.g., from nested types), +deleted docs, primary store size, and total store size (all shards including replicas). All these exposed metrics come directly from Lucene APIs. [float] @@ -31,39 +47,77 @@ the view of relevant stats in the context of only the primaries. Which indices are yellow? -[source,sh] +[source,js] -------------------------------------------------- -% curl localhost:9200/_cat/indices?health=yellow -yellow open wiki 2 1 6401 1115 151.4mb 151.4mb -yellow open twitter 5 1 11434 0 32mb 32mb +GET /_cat/indices?v&health=yellow -------------------------------------------------- +// CONSOLE +// TEST[continued] + +Which looks like: + +[source,txt] +-------------------------------------------------- +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size +yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb +-------------------------------------------------- +// TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] +// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ/.+/ _cat] What's my largest index by disk usage not including replicas? -[source,sh] +[source,js] -------------------------------------------------- -% curl 'localhost:9200/_cat/indices?bytes=b' | sort -rnk8 -green open wiki 2 0 6401 1115 158843725 158843725 -green open twitter 5 1 11434 0 67155614 33577857 -green open twitter2 2 0 2030 0 6125085 6125085 +GET /_cat/indices?v&s=store.size:desc -------------------------------------------------- +// CONSOLE +// TEST[continued] -How many merge operations have the shards for the `wiki` completed? +Which looks like: -[source,sh] +[source,txt] -------------------------------------------------- -% curl 'localhost:9200/_cat/indices/wiki?pri&v&h=health,index,pri,rep,docs.count,mt' -health index docs.count mt pri.mt -green wiki 9646 16 16 +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size +yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb +green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 5 0 0 0 260b 260b -------------------------------------------------- +// TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] +// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] + +How many merge operations have the shards for the `twitter` completed? + +[source,js] +-------------------------------------------------- +GET /_cat/indices/twitter?pri&v&h=health,index,pri,rep,docs.count,mt +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Might look like: + +[source,js] +-------------------------------------------------- +health index pri rep docs.count mt pri.mt +yellow twitter 1 1 1200 16 16 +-------------------------------------------------- +// TESTRESPONSE[s/16/\\d+/ _cat] How much memory is used per index? -[source,sh] +[source,js] -------------------------------------------------- -% curl 'localhost:9200/_cat/indices?v&h=i,tm' -i tm -wiki 8.1gb -test 30.5kb -user 1.9mb +GET /_cat/indices?v&h=i,tm&s=tm:desc -------------------------------------------------- +// CONSOLE +// TEST[continued] + +Might look like: + +[source,js] +-------------------------------------------------- +i tm +twitter 8.1gb +twitter2 30.5kb +-------------------------------------------------- +// TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] +// TESTRESPONSE[_cat] diff --git a/docs/reference/cat/master.asciidoc b/docs/reference/cat/master.asciidoc index caed564d7b5..dfa10d6e3a4 100644 --- a/docs/reference/cat/master.asciidoc +++ b/docs/reference/cat/master.asciidoc @@ -2,14 +2,22 @@ == cat master `master` doesn't have any extra options. It simply displays the -master's node ID, bound IP address, and node name. +master's node ID, bound IP address, and node name. For example: -[source,sh] +[source,js] -------------------------------------------------- -% curl 'localhost:9200/_cat/master?v' -id ip node -Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA +GET /_cat/master?v -------------------------------------------------- +// CONSOLE + +might respond: + +[source,txt] +-------------------------------------------------- +id host ip node +YzWoH_2BT-6UjVGDyPdqYg 127.0.0.1 127.0.0.1 YzWoH_2 +-------------------------------------------------- +// TESTRESPONSE[s/YzWoH_2.+/.+/ _cat] This information is also available via the `nodes` command, but this is slightly shorter when all you want to do, for example, is verify @@ -25,3 +33,4 @@ Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA [3] 19:16:37 [SUCCESS] es1.vm Ntgn2DcuTjGuXlhKDUD4vA 192.168.56.30 H5dfFeA -------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/cat/nodeattrs.asciidoc b/docs/reference/cat/nodeattrs.asciidoc index 1677583a709..18feeba8d03 100644 --- a/docs/reference/cat/nodeattrs.asciidoc +++ b/docs/reference/cat/nodeattrs.asciidoc @@ -2,34 +2,26 @@ == cat nodeattrs The `nodeattrs` command shows custom node attributes. +For example: -["source","sh",subs="attributes,callouts"] +[source,js] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/nodeattrs -node host ip attr value -DKDM97B epsilon 192.168.1.8 rack rack314 -DKDM97B epsilon 192.168.1.8 azone us-east-1 +GET /_cat/nodeattrs?v -------------------------------------------------- +// CONSOLE -The first few columns give you basic info per node. +Could look like: - -["source","sh",subs="attributes,callouts"] +[source,txt] -------------------------------------------------- -node host ip -DKDM97B epsilon 192.168.1.8 -DKDM97B epsilon 192.168.1.8 +node host ip attr value +EK_AsJb 127.0.0.1 127.0.0.1 testattr test -------------------------------------------------- +// TESTRESPONSE[s/EK_AsJb/.+/ _cat] - -The attr and value columns can give you a picture of custom node attributes. - -[source,sh] --------------------------------------------------- -attr value -rack rack314 -azone us-east-1 --------------------------------------------------- +The first few columns (`node`, `host`, `ip`) give you basic info per node +and the `attr` and `value` columns give you the custom node attributes, +one per line. [float] === Columns @@ -49,13 +41,20 @@ by default. To have the headers appear in the output, use verbose mode (`v`). The header name will match the supplied value (e.g., `pid` versus `p`). For example: -["source","sh",subs="attributes,callouts"] +[source,js] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/nodeattrs?v&h=name,pid,attr,value -name pid attr value -DKDM97B 28000 rack rack314 -DKDM97B 28000 azone us-east-1 +GET /_cat/nodeattrs?v&h=name,pid,attr,value -------------------------------------------------- +// CONSOLE + +Might look like: + +[source,js] +-------------------------------------------------- +name pid attr value +EK_AsJb 19566 testattr test +-------------------------------------------------- +// TESTRESPONSE[s/EK_AsJb/.+/ s/19566/\\d*/ _cat] [cols="<,<,<,<,<",options="header",subs="normal"] |======================================================================= diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index b0b152d4c50..2d9312a7849 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -1,38 +1,31 @@ [[cat-nodes]] == cat nodes -The `nodes` command shows the cluster topology. +The `nodes` command shows the cluster topology. For example -[source,sh] +[source,js] -------------------------------------------------- -% GET /_cat/nodes -192.168.56.30 9 78 22 1.80 2.05 2.51 mdi * bGG90GE -192.168.56.10 6 75 14 1.24 2.45 1.37 md - I8hydUG -192.168.56.20 5 71 12 1.07 1.05 1.11 di - H5dfFeA +GET /_cat/nodes?v -------------------------------------------------- +// CONSOLE -The first few columns tell you where your nodes live and give -a picture of your heap, memory, cpu and load. +Might look like: -[source,sh] +[source,txt] -------------------------------------------------- -ip heap.percent ram.percent cpu load_1m load_5m load_15m -192.168.56.30 9 78 22 1.80 2.05 2.51 -192.168.56.10 6 75 14 1.24 2.45 1.37 -192.168.56.20 5 71 12 1.07 1.05 1.11 +ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name +127.0.0.1 65 99 42 3.07 mdi * mJw06l1 -------------------------------------------------- +// TESTRESPONSE[s/3.07/(\\d+\\.\\d+( \\d+\\.\\d+ (\\d+\\.\\d+)?)?)?/] +// TESTRESPONSE[s/65 99 42/\\d+ \\d+ \\d+/] +// TESTRESPONSE[s/[*]/[*]/ s/mJw06l1/.+/ _cat] -The last columns provide ancillary information that can often be -useful when looking at the cluster as a whole, particularly large -ones. How many master-eligible nodes do I have? +The first few columns (`ip, `heap.percent`, `ram.percent`, `cpu, `load_*`) tell +you where your nodes live and give a quick picture of performance stats. -[source,sh] --------------------------------------------------- -node.role master name -mdi * bGG90GE -md - I8hydUG -di - H5dfFeA --------------------------------------------------- +The last (`node.role`, `master`, and `name`) columns provide ancillary +information that can often be useful when looking at the cluster as a whole, +particularly large ones. How many master-eligible nodes do I have? [float] === Columns @@ -52,18 +45,20 @@ by default. To have the headers appear in the output, use verbose mode (`v`). The header name will match the supplied value (e.g., `pid` versus `p`). For example: -[source,sh] +[source,js] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/nodes?v&h=id,ip,port,v,m -id ip port v m -pLSN 192.168.56.30 9300 {version} - -k0zy 192.168.56.10 9300 {version} - -6Tyi 192.168.56.20 9300 {version} * -% curl 192.168.56.10:9200/_cat/nodes?h=id,ip,port,v,m -pLSN 192.168.56.30 9300 {version} - -k0zy 192.168.56.10 9300 {version} - -6Tyi 192.168.56.20 9300 {version} * +GET /_cat/nodes?v&h=id,ip,port,v,m -------------------------------------------------- +// CONSOLE + +Might look like: + +["source","js",subs="attributes,callouts"] +-------------------------------------------------- +id ip port v m +veJR 127.0.0.1 59938 {version} * +-------------------------------------------------- +// TESTRESPONSE[s/veJR/.+/ s/59938/\\d+/ s/[*]/[*]/ _cat] [cols="<,<,<,<,<",options="header",subs="normal"] |======================================================================= diff --git a/docs/reference/cat/pending_tasks.asciidoc b/docs/reference/cat/pending_tasks.asciidoc index 5452052669c..d5216c1eb00 100644 --- a/docs/reference/cat/pending_tasks.asciidoc +++ b/docs/reference/cat/pending_tasks.asciidoc @@ -3,11 +3,18 @@ `pending_tasks` provides the same information as the <> API in a -convenient tabular format. +convenient tabular format. For example: -[source,sh] +[source,js] +-------------------------------------------------- +GET /_cat/pending_tasks?v +-------------------------------------------------- +// CONSOLE + +Might look like: + +[source,txt] -------------------------------------------------- -% curl 'localhost:9200/_cat/pending_tasks?v' insertOrder timeInQueue priority source 1685 855ms HIGH update-mapping [foo][t] 1686 843ms HIGH update-mapping [foo][t] @@ -17,3 +24,6 @@ insertOrder timeInQueue priority source 1690 787ms HIGH update-mapping [foo][t] 1691 773ms HIGH update-mapping [foo][t] -------------------------------------------------- +// TESTRESPONSE[s/(\n.+)+/(\\n.+)*/ _cat] +// We can't assert anything about the tasks in progress here because we don't +// know what might be in progress.... diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index a1d0dc88702..b4aa02a7af7 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -3,12 +3,37 @@ The `plugins` command provides a view per node of running plugins. This information *spans nodes*. -[source,sh] +[source,js] ------------------------------------------------------------------------------ -% curl 'localhost:9200/_cat/plugins?v' -name component version description -I8hydUG discovery-gce 5.0.0 The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism. -I8hydUG lang-javascript 5.0.0 The JavaScript language plugin allows to have javascript as the language of scripts to execute. +GET /_cat/plugins?v&s=component&h=name,component,version,description +------------------------------------------------------------------------------ +// CONSOLE + +Might look like: + +["source","txt",subs="attributes,callouts"] +------------------------------------------------------------------------------ +name component version description +U7321H6 analysis-icu {version} The ICU Analysis plugin integrates Lucene ICU module into elasticsearch, adding ICU relates analysis components. +U7321H6 analysis-kuromoji {version} The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into elasticsearch. +U7321H6 analysis-phonetic {version} The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch. +U7321H6 analysis-smartcn {version} Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into elasticsearch. +U7321H6 analysis-stempel {version} The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into elasticsearch. +U7321H6 analysis-ukrainian {version} The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch. +U7321H6 discovery-azure-classic {version} The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism +U7321H6 discovery-ec2 {version} The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism. +U7321H6 discovery-file {version} Discovery file plugin enables unicast discovery from hosts stored in a file. +U7321H6 discovery-gce {version} The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism. +U7321H6 ingest-attachment {version} Ingest processor that uses Apache Tika to extract contents +U7321H6 ingest-geoip {version} Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database +U7321H6 ingest-user-agent {version} Ingest processor that extracts information from a user agent +U7321H6 jvm-example {version} Demonstrates all the pluggable Java entry points in Elasticsearch +U7321H6 lang-javascript {version} The JavaScript language plugin allows to have javascript as the language of scripts to execute. +U7321H6 lang-python {version} The Python language plugin allows to have python as the language of scripts to execute. +U7321H6 mapper-murmur3 {version} The Mapper Murmur3 plugin allows to compute hashes of a field's values at index-time and to store them in the index. +U7321H6 mapper-size {version} The Mapper Size plugin allows document to record their uncompressed size at index time. +U7321H6 store-smb {version} The Store SMB plugin adds support for SMB stores. ------------------------------------------------------------------------------- +// TESTRESPONSE[s/([.()])/\\$1/ s/U7321H6/.+/ _cat] We can tell quickly how many plugins per node we have and which versions. diff --git a/docs/reference/cat/repositories.asciidoc b/docs/reference/cat/repositories.asciidoc index 5fb68a8929c..8caf3c5fd6f 100644 --- a/docs/reference/cat/repositories.asciidoc +++ b/docs/reference/cat/repositories.asciidoc @@ -1,14 +1,24 @@ [[cat-repositories]] == cat repositories -The `repositories` command shows the snapshot repositories registered in the cluster. +The `repositories` command shows the snapshot repositories registered in the +cluster. For example: -[source,sh] +[source,js] +-------------------------------------------------- +GET /_cat/repositories?v +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT \/_snapshot\/repo1\n{"type": "fs", "settings": {"location": "repo\/1"}}\n/] + +might looks like: + +[source,txt] -------------------------------------------------- -% curl 'localhost:9200/_cat/repositories?v' id type repo1 fs repo2 s3 -------------------------------------------------- +// TESTRESPONSE[s/\nrepo2 s3// _cat] We can quickly see which repositories are registered and their type. diff --git a/docs/reference/cat/segments.asciidoc b/docs/reference/cat/segments.asciidoc index 84456e3a313..d0b0e369538 100644 --- a/docs/reference/cat/segments.asciidoc +++ b/docs/reference/cat/segments.asciidoc @@ -3,24 +3,24 @@ The `segments` command provides low level information about the segments in the shards of an index. It provides information similar to the -link:indices-segments.html[_segments] endpoint. +link:indices-segments.html[_segments] endpoint. For example: -[source,sh] +[source,js] -------------------------------------------------- -% curl 'http://localhost:9200/_cat/segments?v' -index shard prirep ip segment generation docs.count [...] -test 4 p 192.168.2.105 _0 0 1 -test1 2 p 192.168.2.105 _0 0 1 -test1 3 p 192.168.2.105 _2 2 1 +GET /_cat/segments?v -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT \/test\/test\/1?refresh\n{"test":"test"}\nPUT \/test1\/test\/1?refresh\n{"test":"test"}\n/] -[source,sh] +might look like: + +["source","txt",subs="attributes,callouts"] -------------------------------------------------- -[...] docs.deleted size size.memory committed searchable version compound - 0 2.9kb 7818 false true 4.10.2 true - 0 2.9kb 7818 false true 4.10.2 true - 0 2.9kb 7818 false true 4.10.2 true +index shard prirep ip segment generation docs.count docs.deleted size size.memory committed searchable version compound +test 3 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true +test1 3 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true -------------------------------------------------- +// TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ _cat] The output shows information about index names and shard numbers in the first two columns. diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index e49e2f6b8aa..215cf8b8d7a 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -74,7 +74,7 @@ curl -XGET 'http://localhost:9200/_nodes/stats/process' curl -XGET 'http://localhost:9200/_nodes/10.0.0.1/stats/process' -------------------------------------------------- -The `all` flag can be set to return all the stats. +All stats can be explicitly requested via `/_nodes/stats/_all` or `/_nodes/stats?metric=_all`. [float] [[fs-info]] @@ -217,6 +217,43 @@ the operating system: `os.swap.used_in_bytes`:: Amount of used swap space in bytes +`os.cgroup.cpuacct.control_group` (Linux only):: + The `cpuacct` control group to which the Elasticsearch process + belongs + +`os.cgroup.cpuacct.usage` (Linux only):: + The total CPU time (in nanoseconds) consumed by all tasks in the + same cgroup as the Elasticsearch process + +`os.cgroup.cpu.control_group` (Linux only):: + The `cpu` control group to which the Elasticsearch process belongs + +`os.cgroup.cpu.cfs_period_micros` (Linux only):: + The period of time (in microseconds) for how regularly all tasks in + the same cgroup as the Elasticsearch process should have their + access to CPU resources reallocated. + +`os.cgroup.cpu.cfs_quota_micros` (Linux only):: + The total amount of time (in microseconds) for which all tasks in + the same cgroup as the Elasticsearch process can run during one + period `os.cgroup.cpu.cfs_period_micros` + +`os.cgroup.cpu.stat.number_of_elapsed_periods` (Linux only):: + The number of reporting periods (as specified by + `os.cgroup.cpu.cfs_period_micros`) that have elapsed + +`os.cgroup.cpu.stat.number_of_times_throttled` (Linux only):: + The number of times all tasks in the same cgroup as the + Elasticsearch process have been throttled. + +`os.cgroup.cpu.stat.time_throttled_nanos` (Linux only):: + The total amount of time (in nanoseconds) for which all tasks in + the same cgroup as the Elasticsearch process have been throttled. + +NOTE: For the cgroup stats to be visible, cgroups must be compiled into +the kernal, the `cpu` and `cpuacct` cgroup subsystems must be +configured and stats must be readable from `/sys/fs/cgroup/cpu` +and `/sys/fs/cgroup/cpuacct`. [float] [[process-stats]] diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index dec4fb8f69f..ce550a689bf 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -62,7 +62,7 @@ It is also possible to retrieve information for a particular task: [source,js] -------------------------------------------------- -GET _tasks/taskId:1 <1> +GET _tasks/task_id:1 <1> -------------------------------------------------- // CONSOLE // TEST[catch:missing] @@ -108,13 +108,14 @@ GET _cat/tasks // CONSOLE [float] +[[task-cancellation]] === Task Cancellation If a long-running task supports cancellation, it can be cancelled by the following command: [source,js] -------------------------------------------------- -POST _tasks/taskId:1/_cancel +POST _tasks/task_id:1/_cancel -------------------------------------------------- // CONSOLE @@ -124,7 +125,7 @@ nodes `nodeId1` and `nodeId2`. [source,js] -------------------------------------------------- -POST _tasks/_cancel?node_id=nodeId1,nodeId2&actions=*reindex +POST _tasks/_cancel?nodes=nodeId1,nodeId2&actions=*reindex -------------------------------------------------- // CONSOLE diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index 468f4545627..3c5fad65788 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -233,7 +233,7 @@ You can fetch the status of any running delete-by-query requests with the [source,js] -------------------------------------------------- -GET _tasks?detailed=true&action=*/delete/byquery +GET _tasks?detailed=true&actions=*/delete/byquery -------------------------------------------------- // CONSOLE @@ -308,7 +308,7 @@ Any Delete By Query can be canceled using the <>: [source,js] -------------------------------------------------- -POST _tasks/taskid:1/_cancel +POST _tasks/task_id:1/_cancel -------------------------------------------------- // CONSOLE @@ -327,7 +327,7 @@ using the `_rethrottle` API: [source,js] -------------------------------------------------- -POST _delete_by_query/taskid:1/_rethrottle?requests_per_second=-1 +POST _delete_by_query/task_id:1/_rethrottle?requests_per_second=-1 -------------------------------------------------- // CONSOLE @@ -341,6 +341,7 @@ take effect on after completing the current batch. This prevents scroll timeouts. [float] +[[docs-delete-by-query-manual-slice]] === Manually slicing Delete-by-query supports <> allowing you to manually parallelize @@ -410,3 +411,97 @@ Which results in a sensible `total` like this one: } ---------------------------------------------------------------- // TESTRESPONSE + +[float] +[[docs-delete-by-query-automatic-slice]] +=== Automatic slicing + +You can also let delete-by-query automatically parallelize using +<> to slice on `_uid`: + +[source,js] +---------------------------------------------------------------- +POST twitter/_delete_by_query?refresh&slices=5 +{ + "query": { + "range": { + "likes": { + "lt": 10 + } + } + } +} +---------------------------------------------------------------- +// CONSOLE +// TEST[setup:big_twitter] + +Which you also can verify works with: + +[source,js] +---------------------------------------------------------------- +POST twitter/_search?size=0&filter_path=hits.total +{ + "query": { + "range": { + "likes": { + "lt": 10 + } + } + } +} +---------------------------------------------------------------- +// CONSOLE +// TEST[continued] + +Which results in a sensible `total` like this one: + +[source,js] +---------------------------------------------------------------- +{ + "hits": { + "total": 0 + } +} +---------------------------------------------------------------- +// TESTRESPONSE + +Adding `slices` to `_delete_by_query` just automates the manual process used in +the section above, creating sub-requests which means it has some quirks: +* You can see these requests in the +<>. These sub-requests are "child" +tasks of the task for the request with `slices`. +* Fetching the status of the task for the request with `slices` only contains +the status of completed slices. +* These sub-requests are individually addressable for things like cancellation +and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished +sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices` each sub-request won't get a perfectly even +portion of the documents. All documents will be addressed, but some slices may +be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `size` on a request with `slices` +are distributed proportionally to each sub-request. Combine that with the point +above about distribution being uneven and you should conclude that the using +`size` with `slices` might not result in exactly `size` documents being +`_delete_by_query`ed. +* Each sub-requests gets a slightly different snapshot of the source index +though these are all taken at approximately the same time. + +[float] +[[docs-delete-by-query-picking-slices]] +=== Picking the number of slices + +At this point we have a few recommendations around the number of `slices` to +use (the `max` parameter in the slice API if manually parallelizing): + +* Don't use large numbers. `500` creates fairly massive CPU thrash. +* It is more efficient from a query performance standpoint to use some multiple +of the number of shards in the source index. +* Using exactly as many shards as are in the source index is the most efficient +from a query performance standpoint. +* Indexing performance should scale linearly across available resources with +the number of `slices`. +* Whether indexing or query performance dominates that process depends on lots +of factors like the documents being reindexed and the cluster doing the +reindexing. diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index a130e66a191..dd5c1de1485 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -26,7 +26,7 @@ The result of the above delete operation is: "_type" : "tweet", "_id" : "1", "_version" : 2, - "result: deleted" + "result": "deleted" } -------------------------------------------------- diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index bd7c09319e2..8a67f8d57d8 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -157,12 +157,10 @@ than the version of the stored document. If there is no existing document the operation will succeed as well. The given version will be used as the new version and will be stored with the new document. The supplied version must be a non-negative long number. -`force`:: the document will be indexed regardless of the version of the stored document or if there -is no existing document. The given version will be used as the new version and will be stored -with the new document. This version type is typically used for correcting errors. - -*NOTE*: The `external_gte` & `force` version types are meant for special use cases and should be used -with care. If used incorrectly, they can result in loss of data. +*NOTE*: The `external_gte` version type is meant for special use cases and +should be used with care. If used incorrectly, it can result in loss of data. +There is another option, `force`, which is deprecated because it can cause +primary and replica shards to diverge. [float] [[operation-type]] @@ -331,7 +329,7 @@ of configured copies per shard in the index (which is `number_of_replicas+1`). Specifying a negative value or a number greater than the number of shard copies will throw an error. -For example, suppose we have a cluster of three nodes, `A, `B`, and `C` and +For example, suppose we have a cluster of three nodes, `A`, `B`, and `C` and we create an index `index` with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If we attempt an indexing operation, by default the operation will only ensure diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 46786c0b6df..6279c3cae0f 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -3,6 +3,11 @@ experimental[The reindex API is new and should still be considered experimental. The API may change in ways that are not backwards compatible] +IMPORTANT: Reindex does not attempt to set up the destination index. It does +not copy the settings of the source index. You should set up the destination +index prior to running a `_reindex` action, including setting up mappings, shard +counts, replicas, etc. + The most basic form of `_reindex` just copies documents from one index to another. This will copy documents from the `twitter` index into the `new_twitter` index: @@ -362,6 +367,7 @@ POST _reindex // TEST[s/^/PUT source\n/] [float] +[[reindex-from-remote]] === Reindex from Remote Reindex supports reindexing from a remote Elasticsearch cluster: @@ -404,8 +410,8 @@ basic auth or the password will be sent in plain text. Remote hosts have to be explicitly whitelisted in elasticsearch.yaml using the `reindex.remote.whitelist` property. It can be set to a comma delimited list of allowed remote `host` and `port` combinations (e.g. -`otherhost:9200, another:9200`). Scheme is ignored by the whitelist - only host -and port are used. +`otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*`). Scheme is +ignored by the whitelist - only host and port are used. This feature should work with remote clusters of any version of Elasticsearch you are likely to find. This should allow you to upgrade from any version of @@ -415,6 +421,42 @@ version. To enable queries sent to older versions of Elasticsearch the `query` parameter is sent directly to the remote host without validation or modification. +Reindexing from a remote server uses an on-heap buffer that defaults to a +maximum size of 200mb. If the remote index includes very large documents you'll +need to use a smaller batch size. The example below sets the batch size `10` +which is very, very small. + +[source,js] +-------------------------------------------------- +POST _reindex +{ + "source": { + "remote": { + "host": "http://otherhost:9200", + "username": "user", + "password": "pass" + }, + "index": "source", + "size": 10, + "query": { + "match": { + "test": "data" + } + } + }, + "dest": { + "index": "dest" + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:host] +// TEST[s/^/PUT source\n/] +// TEST[s/otherhost:9200",/\${host}"/] +// TEST[s/"username": "user",//] +// TEST[s/"password": "pass"//] + + [float] === URL Parameters @@ -595,7 +637,7 @@ Any Reindex can be canceled using the <>: [source,js] -------------------------------------------------- -POST _tasks/taskid:1/_cancel +POST _tasks/task_id:1/_cancel -------------------------------------------------- // CONSOLE @@ -614,7 +656,7 @@ the `_rethrottle` API: [source,js] -------------------------------------------------- -POST _reindex/taskid:1/_rethrottle?requests_per_second=-1 +POST _reindex/task_id:1/_rethrottle?requests_per_second=-1 -------------------------------------------------- // CONSOLE @@ -695,9 +737,9 @@ and it'll look like: Or you can search by `tag` or whatever you want. [float] -=== Manually slicing - -Reindex supports <> allowing you to manually parallelize the +[[docs-reindex-manual-slice]] +==== Manual slicing +Reindex supports <>, allowing you to manually parallelize the process relatively easily: [source,js] @@ -754,6 +796,89 @@ Which results in a sensible `total` like this one: ---------------------------------------------------------------- // TESTRESPONSE +[float] +[[docs-reindex-automatic-slice]] +=== Automatic slicing + +You can also let reindex automatically parallelize using <> to +slice on `_uid`: + +[source,js] +---------------------------------------------------------------- +POST _reindex?slices=5&refresh +{ + "source": { + "index": "twitter" + }, + "dest": { + "index": "new_twitter" + } +} +---------------------------------------------------------------- +// CONSOLE +// TEST[setup:big_twitter] + +Which you also can verify works with: + +[source,js] +---------------------------------------------------------------- +POST new_twitter/_search?size=0&filter_path=hits.total +---------------------------------------------------------------- +// CONSOLE +// TEST[continued] + +Which results in a sensible `total` like this one: + +[source,js] +---------------------------------------------------------------- +{ + "hits": { + "total": 120 + } +} +---------------------------------------------------------------- +// TESTRESPONSE + +Adding `slices` to `_reindex` just automates the manual process used in the +section above, creating sub-requests which means it has some quirks: +* You can see these requests in the <>. These +sub-requests are "child" tasks of the task for the request with `slices`. +* Fetching the status of the task for the request with `slices` only contains +the status of completed slices. +* These sub-requests are individually addressable for things like cancellation +and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished +sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices` each sub-request won't get a perfectly even +portion of the documents. All documents will be addressed, but some slices may +be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `size` on a request with `slices` +are distributed proportionally to each sub-request. Combine that with the point +above about distribution being uneven and you should conclude that the using +`size` with `slices` might not result in exactly `size` documents being +`_reindex`ed. +* Each sub-requests gets a slightly different snapshot of the source index +though these are all taken at approximately the same time. + +[float] +[[docs-reindex-picking-slices]] +=== Picking the number of slices + +At this point we have a few recommendations around the number of `slices` to +use (the `max` parameter in the slice API if manually parallelizing): + +* Don't use large numbers. `500` creates fairly massive CPU thrash. +* It is more efficient from a query performance standpoint to use some multiple +of the number of shards in the source index. +* Using exactly as many shards as are in the source index is the most efficient +from a query performance standpoint. +* Indexing performance should scale linearly across available resources with +the number of `slices`. +* Whether indexing or query performance dominates that process depends on lots +of factors like the documents being reindexed and the cluster doing the +reindexing. + [float] === Reindex daily indices diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 7299d398e60..69c22921225 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -295,7 +295,7 @@ You can fetch the status of all running update-by-query requests with the [source,js] -------------------------------------------------- -GET _tasks?detailed=true&action=*byquery +GET _tasks?detailed=true&actions=*byquery -------------------------------------------------- // CONSOLE @@ -373,7 +373,7 @@ Any Update By Query can be canceled using the <>: [source,js] -------------------------------------------------- -POST _tasks/taskid:1/_cancel +POST _tasks/task_id:1/_cancel -------------------------------------------------- // CONSOLE @@ -392,7 +392,7 @@ using the `_rethrottle` API: [source,js] -------------------------------------------------- -POST _update_by_query/taskid:1/_rethrottle?requests_per_second=-1 +POST _update_by_query/task_id:1/_rethrottle?requests_per_second=-1 -------------------------------------------------- // CONSOLE @@ -406,8 +406,8 @@ take effect on after completing the current batch. This prevents scroll timeouts. [float] -=== Manually slicing - +[[docs-update-by-query-manual-slice]] +==== Manual slicing Update-by-query supports <> allowing you to manually parallelize the process relatively easily: @@ -459,6 +459,88 @@ Which results in a sensible `total` like this one: ---------------------------------------------------------------- // TESTRESPONSE +[float] +[[docs-update-by-query-automatic-slice]] +=== Automatic slicing + +You can also let update-by-query automatically parallelize using +<> to slice on `_uid`: + +[source,js] +---------------------------------------------------------------- +POST twitter/_update_by_query?refresh&slices=5 +{ + "script": { + "inline": "ctx._source['extra'] = 'test'" + } +} +---------------------------------------------------------------- +// CONSOLE +// TEST[setup:big_twitter] + +Which you also can verify works with: + +[source,js] +---------------------------------------------------------------- +POST twitter/_search?size=0&q=extra:test&filter_path=hits.total +---------------------------------------------------------------- +// CONSOLE +// TEST[continued] + +Which results in a sensible `total` like this one: + +[source,js] +---------------------------------------------------------------- +{ + "hits": { + "total": 120 + } +} +---------------------------------------------------------------- +// TESTRESPONSE + +Adding `slices` to `_update_by_query` just automates the manual process used in +the section above, creating sub-requests which means it has some quirks: +* You can see these requests in the +<>. These sub-requests are "child" +tasks of the task for the request with `slices`. +* Fetching the status of the task for the request with `slices` only contains +the status of completed slices. +* These sub-requests are individually addressable for things like cancellation +and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished +sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices` each sub-request won't get a perfectly even +portion of the documents. All documents will be addressed, but some slices may +be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `size` on a request with `slices` +are distributed proportionally to each sub-request. Combine that with the point +above about distribution being uneven and you should conclude that the using +`size` with `slices` might not result in exactly `size` documents being +`_update_by_query`ed. +* Each sub-requests gets a slightly different snapshot of the source index +though these are all taken at approximately the same time. + +[float] +[[docs-update-by-query-picking-slices]] +=== Picking the number of slices + +At this point we have a few recommendations around the number of `slices` to +use (the `max` parameter in the slice API if manually parallelizing): + +* Don't use large numbers. `500` creates fairly massive CPU thrash. +* It is more efficient from a query performance standpoint to use some multiple +of the number of shards in the source index. +* Using exactly as many shards as are in the source index is the most efficient +from a query performance standpoint. +* Indexing performance should scale linearly across available resources with +the number of `slices`. +* Whether indexing or query performance dominates that process depends on lots +of factors like the documents being reindexed and the cluster doing the +reindexing. + + [float] [[picking-up-a-new-property]] === Pick up a new property diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index b31e2e131a3..2d478303ca9 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -67,7 +67,7 @@ POST test/type1/1/_update In addition to `_source`, the following variables are available through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, -`_parent`. +`_parent`, and `_now` (the current timestamp). We can also add a new field to the document: diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index b8b48b21bd9..5e7830ce90c 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -118,6 +118,7 @@ Let's download the Elasticsearch {version} tar as follows (Windows users should -------------------------------------------------- curl -L -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.tar.gz -------------------------------------------------- +// NOTCONSOLE Then extract it as follows (Windows users should unzip the zip package): @@ -144,7 +145,6 @@ If everything goes well, you should see a bunch of messages that look like below ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -./elasticsearch [2016-09-16T14:17:51,251][INFO ][o.e.n.Node ] [] initializing ... [2016-09-16T14:17:51,329][INFO ][o.e.e.NodeEnvironment ] [6-bjhwl] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [317.7gb], net total_space [453.6gb], spins? [no], types [ext4] [2016-09-16T14:17:51,330][INFO ][o.e.e.NodeEnvironment ] [6-bjhwl] heap size [1.9gb], compressed ordinary object pointers [true] @@ -198,42 +198,49 @@ Now that we have our node (and cluster) up and running, the next step is to unde Let's start with a basic health check, which we can use to see how our cluster is doing. We'll be using curl to do this but you can use any tool that allows you to make HTTP/REST calls. Let's assume that we are still on the same node where we started Elasticsearch on and open another command shell window. -To check the cluster health, we will be using the <>. Remember previously that our node HTTP endpoint is available at port `9200`: +To check the cluster health, we will be using the <>. You can +run the command below in https://www.elastic.co/guide/en/kibana/{branch}/console-kibana.html[Kibana's Console] +by clicking "VIEW IN CONSOLE" or with `curl` by clicking the "COPY AS CURL" +link below and pasting the into a terminal. -[source,sh] +[source,js] -------------------------------------------------- -curl 'localhost:9200/_cat/health?v' +GET /_cat/health?v -------------------------------------------------- +// CONSOLE And the response: -[source,sh] +[source,txt] -------------------------------------------------- -epoch timestamp cluster status node.total node.data shards pri relo init unassign -1394735289 14:28:09 elasticsearch green 1 1 0 0 0 0 0 +epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent +1475247709 17:01:49 elasticsearch green 1 1 0 0 0 0 0 0 - 100.0% -------------------------------------------------- +// TESTRESPONSE[s/1475247709 17:01:49 elasticsearch/\\d+ \\d+:\\d+:\\d+ docs_integTest/ _cat] We can see that our cluster named "elasticsearch" is up with a green status. Whenever we ask for the cluster health, we either get green, yellow, or red. Green means everything is good (cluster is fully functional), yellow means all data is available but some replicas are not yet allocated (cluster is fully functional), and red means some data is not available for whatever reason. Note that even if a cluster is red, it still is partially functional (i.e. it will continue to serve search requests from the available shards) but you will likely need to fix it ASAP since you have missing data. -Also from the above response, we can see and total of 1 node and that we have 0 shards since we have no data in it yet. Note that since we are using the default cluster name (elasticsearch) and since Elasticsearch uses unicast network discovery by default to find other nodes on the same machine, it is possible that you could accidentally start up more than one node on your computer and have them all join a single cluster. In this scenario, you may see more than 1 node in the above response. +Also from the above response, we can see a total of 1 node and that we have 0 shards since we have no data in it yet. Note that since we are using the default cluster name (elasticsearch) and since Elasticsearch uses unicast network discovery by default to find other nodes on the same machine, it is possible that you could accidentally start up more than one node on your computer and have them all join a single cluster. In this scenario, you may see more than 1 node in the above response. We can also get a list of nodes in our cluster as follows: -[source,sh] +[source,js] -------------------------------------------------- -curl 'localhost:9200/_cat/nodes?v' +GET /_cat/nodes?v -------------------------------------------------- +// CONSOLE And the response: -[source,sh] +[source,txt] -------------------------------------------------- -curl 'localhost:9200/_cat/nodes?v' -host ip heap.percent ram.percent load node.role master name -mwubuntu1 127.0.1.1 8 4 0.00 d * I8hydUG +ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name +127.0.0.1 10 5 5 4.46 mdi * PB2SGZY -------------------------------------------------- +// TESTRESPONSE[s/10 5 5 4.46/\\d+ \\d+ \\d+ (\\d+\\.\\d+)? (\\d+\\.\\d+)? (\\d+\.\\d+)?/] +// TESTRESPONSE[s/[*]/[*]/ s/PB2SGZY/.+/ _cat] Here, we can see our one node named "I8hydUG", which is the single node that is currently in our cluster. @@ -241,18 +248,19 @@ Here, we can see our one node named "I8hydUG", which is the single node that is Now let's take a peek at our indices: -[source,sh] +[source,js] -------------------------------------------------- -curl 'localhost:9200/_cat/indices?v' +GET /_cat/indices?v -------------------------------------------------- +// CONSOLE And the response: -[source,sh] +[source,txt] -------------------------------------------------- -curl 'localhost:9200/_cat/indices?v' -health index pri rep docs.count docs.deleted store.size pri.store.size +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -------------------------------------------------- +// TESTRESPONSE[_cat] Which simply means we have no indices yet in the cluster. @@ -260,28 +268,23 @@ Which simply means we have no indices yet in the cluster. Now let's create an index named "customer" and then list all the indexes again: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPUT 'localhost:9200/customer?pretty' -curl 'localhost:9200/_cat/indices?v' +PUT /customer?pretty +GET /_cat/indices?v -------------------------------------------------- +// CONSOLE The first command creates the index named "customer" using the PUT verb. We simply append `pretty` to the end of the call to tell it to pretty-print the JSON response (if any). And the response: -[source,sh] +[source,txt] -------------------------------------------------- -curl -XPUT 'localhost:9200/customer?pretty' -{ - "acknowledged" : true, - "shards_acknowledged": true -} - -curl 'localhost:9200/_cat/indices?v' -health index pri rep docs.count docs.deleted store.size pri.store.size -yellow customer 5 1 0 0 495b 495b +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size +yellow open customer 95SQ4TSUT7mWBT7VNHH67A 5 1 0 0 260b 260b -------------------------------------------------- +// TESTRESPONSE[s/95SQ4TSUT7mWBT7VNHH67A/.+/ s/260b/\\d+b/ _cat] The results of the second command tells us that we now have 1 index named customer and it has 5 primary shards and 1 replica (the defaults) and it contains 0 documents in it. @@ -293,24 +296,19 @@ Let's now put something into our customer index. Remember previously that in ord Let's index a simple customer document into the customer index, "external" type, with an ID of 1 as follows: -Our JSON document: { "name": "John Doe" } - -[source,sh] +[source,js] -------------------------------------------------- -curl -XPUT 'localhost:9200/customer/external/1?pretty' -d ' +PUT /customer/external/1?pretty { "name": "John Doe" -}' +} -------------------------------------------------- +// CONSOLE And the response: [source,sh] -------------------------------------------------- -curl -XPUT 'localhost:9200/customer/external/1?pretty' -d ' -{ - "name": "John Doe" -}' { "_index" : "customer", "_type" : "external", @@ -325,6 +323,7 @@ curl -XPUT 'localhost:9200/customer/external/1?pretty' -d ' "created" : true } -------------------------------------------------- +// TESTRESPONSE From the above, we can see that a new customer document was successfully created inside the customer index and the external type. The document also has an internal id of 1 which we specified at index time. @@ -332,16 +331,17 @@ It is important to note that Elasticsearch does not require you to explicitly cr Let's now retrieve that document that we just indexed: -[source,sh] +[source,js] -------------------------------------------------- -curl -XGET 'localhost:9200/customer/external/1?pretty' +GET /customer/external/1?pretty -------------------------------------------------- +// CONSOLE +// TEST[continued] And the response: -[source,sh] +[source,js] -------------------------------------------------- -curl -XGET 'localhost:9200/customer/external/1?pretty' { "_index" : "customer", "_type" : "external", @@ -351,6 +351,7 @@ curl -XGET 'localhost:9200/customer/external/1?pretty' "_source" : { "name": "John Doe" } } -------------------------------------------------- +// TESTRESPONSE Nothing out of the ordinary here other than a field, `found`, stating that we found a document with the requested ID 1 and another field, `_source`, which returns the full JSON document that we indexed from the previous step. @@ -358,45 +359,45 @@ Nothing out of the ordinary here other than a field, `found`, stating that we fo Now let's delete the index that we just created and then list all the indexes again: -[source,sh] +[source,js] -------------------------------------------------- -curl -XDELETE 'localhost:9200/customer?pretty' -curl 'localhost:9200/_cat/indices?v' +DELETE /customer?pretty +GET /_cat/indices?v -------------------------------------------------- +// CONSOLE +// TEST[continued] And the response: -[source,sh] +[source,txt] -------------------------------------------------- -curl -XDELETE 'localhost:9200/customer?pretty' -{ - "acknowledged" : true -} -curl 'localhost:9200/_cat/indices?v' -health index pri rep docs.count docs.deleted store.size pri.store.size +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -------------------------------------------------- +// TESTRESPONSE[_cat] Which means that the index was deleted successfully and we are now back to where we started with nothing in our cluster. Before we move on, let's take a closer look again at some of the API commands that we have learned so far: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPUT 'localhost:9200/customer' -curl -XPUT 'localhost:9200/customer/external/1' -d ' +PUT /customer +PUT /customer/external/1 { "name": "John Doe" -}' -curl 'localhost:9200/customer/external/1' -curl -XDELETE 'localhost:9200/customer' +} +GET /customer/external/1 +DELETE /customer -------------------------------------------------- +// CONSOLE If we study the above commands carefully, we can actually see a pattern of how we access data in Elasticsearch. That pattern can be summarized as follows: -[source,sh] +[source,js] -------------------------------------------------- -curl -X :/// + /// -------------------------------------------------- +// NOTCONSOLE This REST access pattern is pervasive throughout all the API commands that if you can simply remember it, you will have a good head start at mastering Elasticsearch. @@ -409,33 +410,38 @@ Elasticsearch provides data manipulation and search capabilities in near real ti We've previously seen how we can index a single document. Let's recall that command again: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPUT 'localhost:9200/customer/external/1?pretty' -d ' +PUT /customer/external/1?pretty { "name": "John Doe" -}' +} -------------------------------------------------- +// CONSOLE Again, the above will index the specified document into the customer index, external type, with the ID of 1. If we then executed the above command again with a different (or same) document, Elasticsearch will replace (i.e. reindex) a new document on top of the existing one with the ID of 1: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPUT 'localhost:9200/customer/external/1?pretty' -d ' +PUT /customer/external/1?pretty { "name": "Jane Doe" -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] The above changes the name of the document with the ID of 1 from "John Doe" to "Jane Doe". If, on the other hand, we use a different ID, a new document will be indexed and the existing document(s) already in the index remains untouched. -[source,sh] +[source,js] -------------------------------------------------- -curl -XPUT 'localhost:9200/customer/external/2?pretty' -d ' +PUT /customer/external/2?pretty { "name": "Jane Doe" -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] The above indexes a new document with an ID of 2. @@ -443,15 +449,17 @@ When indexing, the ID part is optional. If not specified, Elasticsearch will gen This example shows how to index a document without an explicit ID: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/customer/external?pretty' -d ' +POST /customer/external?pretty { "name": "Jane Doe" -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] -Note that in the above case, we are using the POST verb instead of PUT since we didn't specify an ID. +Note that in the above case, we are using the `POST` verb instead of PUT since we didn't specify an ID. === Updating Documents @@ -459,33 +467,39 @@ In addition to being able to index and replace documents, we can also update doc This example shows how to update our previous document (ID of 1) by changing the name field to "Jane Doe": -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/customer/external/1/_update?pretty' -d ' +POST /customer/external/1/_update?pretty { "doc": { "name": "Jane Doe" } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] This example shows how to update our previous document (ID of 1) by changing the name field to "Jane Doe" and at the same time add an age field to it: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/customer/external/1/_update?pretty' -d ' +POST /customer/external/1/_update?pretty { "doc": { "name": "Jane Doe", "age": 20 } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] -Updates can also be performed by using simple scripts. Note that dynamic scripts like the following are disabled by default as of `1.4.3`, have a look at the <> for more details. This example uses a script to increment the age by 5: +Updates can also be performed by using simple scripts. This example uses a script to increment the age by 5: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/customer/external/1/_update?pretty' -d ' +POST /customer/external/1/_update?pretty { "script" : "ctx._source.age += 5" -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] In the above example, `ctx._source` refers to the current source document that is about to be updated. @@ -495,12 +509,16 @@ Note that as of this writing, updates can only be performed on a single document Deleting a document is fairly straightforward. This example shows how to delete our previous customer with the ID of 2: -[source,sh] +[source,js] -------------------------------------------------- -curl -XDELETE 'localhost:9200/customer/external/2?pretty' +DELETE /customer/external/2?pretty -------------------------------------------------- +// CONSOLE +// TEST[continued] -The `delete-by-query` plugin can delete all documents matching a specific query. +See the <> to delete all documents matching a specific query. +It is worth noting that it is much more efficient to delete a whole index +instead of deleting all documents with the Delete By Query API. === Batch Processing @@ -508,26 +526,27 @@ In addition to being able to index, update, and delete individual documents, Ela As a quick example, the following call indexes two documents (ID 1 - John Doe and ID 2 - Jane Doe) in one bulk operation: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/customer/external/_bulk?pretty' -d ' +POST /customer/external/_bulk?pretty {"index":{"_id":"1"}} {"name": "John Doe" } {"index":{"_id":"2"}} {"name": "Jane Doe" } -' -------------------------------------------------- +// CONSOLE This example updates the first document (ID of 1) and then deletes the second document (ID of 2) in one bulk operation: [source,sh] -------------------------------------------------- -curl -XPOST 'localhost:9200/customer/external/_bulk?pretty' -d ' +POST /customer/external/_bulk?pretty {"update":{"_id":"1"}} {"doc": { "name": "John Doe becomes Jane Doe" } } {"delete":{"_id":"2"}} -' -------------------------------------------------- +// CONSOLE +// TEST[continued] Note above that for the delete action, there is no corresponding source document after it since deletes only require the ID of the document to be deleted. @@ -540,7 +559,7 @@ The bulk API executes all the actions sequentially and in order. If a single act Now that we've gotten a glimpse of the basics, let's try to work on a more realistic dataset. I've prepared a sample of fictitious JSON documents of customer bank account information. Each document has the following schema: -[source,sh] +[source,js] -------------------------------------------------- { "account_number": 0, @@ -556,28 +575,43 @@ Now that we've gotten a glimpse of the basics, let's try to work on a more reali "state": "CO" } -------------------------------------------------- +// NOTCONSOLE For the curious, I generated this data from http://www.json-generator.com/[`www.json-generator.com/`] so please ignore the actual values and semantics of the data as these are all randomly generated. [float] === Loading the Sample Dataset -You can download the sample dataset (accounts.json) from https://github.com/bly2k/files/blob/master/accounts.zip?raw=true[here]. Extract it to our current directory and let's load it into our cluster as follows: +You can download the sample dataset (accounts.json) from https://github.com/elastic/elasticsearch/blob/master/docs/src/test/resources/accounts.json?raw=true[here]. Extract it to our current directory and let's load it into our cluster as follows: [source,sh] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/account/_bulk?pretty' --data-binary "@accounts.json" +curl -XPOST 'localhost:9200/bank/account/_bulk?pretty&refresh' --data-binary "@accounts.json" curl 'localhost:9200/_cat/indices?v' -------------------------------------------------- +// NOTCONSOLE + +//// +This replicates the above in a document-testing friendly way but isn't visible +in the docs: + +[source,js] +-------------------------------------------------- +GET /_cat/indices?v +-------------------------------------------------- +// CONSOLE +// TEST[setup:bank] +//// And the response: -[source,sh] +[source,js] -------------------------------------------------- -curl 'localhost:9200/_cat/indices?v' -health index pri rep docs.count docs.deleted store.size pri.store.size -yellow bank 5 1 1000 0 424.4kb 424.4kb +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size +yellow open bank l7sSYV2cQXmu6_4rJWVIww 5 1 1000 0 128.6kb 128.6kb -------------------------------------------------- +// TESTRESPONSE[s/128.6kb/\\d+(\\.\\d+)?[mk]?b/] +// TESTRESPONSE[s/l7sSYV2cQXmu6_4rJWVIww/.+/ _cat] Which means that we just successfully bulk indexed 1000 documents into the bank index (under the account type). @@ -587,18 +621,19 @@ Now let's start with some simple searches. There are two basic ways to run searc The REST API for search is accessible from the `_search` endpoint. This example returns all documents in the bank index: -[source,sh] +[source,js] -------------------------------------------------- -curl 'localhost:9200/bank/_search?q=*&pretty' +GET /bank/_search?q=*&sort=account_number:asc -------------------------------------------------- +// CONSOLE +// TEST[continued] Let's first dissect the search call. We are searching (`_search` endpoint) in the bank index, and the `q=*` parameter instructs Elasticsearch to match all documents in the index. The `pretty` parameter, again, just tells Elasticsearch to return pretty-printed JSON results. And the response (partially shown): -[source,sh] +[source,js] -------------------------------------------------- -curl 'localhost:9200/bank/_search?q=*&pretty' { "took" : 63, "timed_out" : false, @@ -609,21 +644,28 @@ curl 'localhost:9200/bank/_search?q=*&pretty' }, "hits" : { "total" : 1000, - "max_score" : 1.0, + "max_score" : null, "hits" : [ { + "_index" : "bank", + "_type" : "account", + "_id" : "0", + "sort": [0], + "_score" : null, + "_source" : {"account_number":0,"balance":16623,"firstname":"Bradshaw","lastname":"Mckenzie","age":29,"gender":"F","address":"244 Columbus Place","employer":"Euron","email":"bradshawmckenzie@euron.com","city":"Hobucken","state":"CO"} + }, { "_index" : "bank", "_type" : "account", "_id" : "1", - "_score" : 1.0, "_source" : {"account_number":1,"balance":39225,"firstname":"Amber","lastname":"Duke","age":32,"gender":"M","address":"880 Holmes Lane","employer":"Pyrami","email":"amberduke@pyrami.com","city":"Brogan","state":"IL"} - }, { - "_index" : "bank", - "_type" : "account", - "_id" : "6", - "_score" : 1.0, "_source" : {"account_number":6,"balance":5686,"firstname":"Hattie","lastname":"Bond","age":36,"gender":"M","address":"671 Bristol Street","employer":"Netagy","email":"hattiebond@netagy.com","city":"Dante","state":"TN"} - }, { - "_index" : "bank", - "_type" : "account", + "sort": [1], + "_score" : null, + "_source" : {"account_number":1,"balance":39225,"firstname":"Amber","lastname":"Duke","age":32,"gender":"M","address":"880 Holmes Lane","employer":"Pyrami","email":"amberduke@pyrami.com","city":"Brogan","state":"IL"} + }, ... + ] + } +} -------------------------------------------------- +// TESTRESPONSE[s/"took" : 63/"took" : $body.took/] +// TESTRESPONSE[s/\.\.\./$body.hits.hits.2, $body.hits.hits.3, $body.hits.hits.4, $body.hits.hits.5, $body.hits.hits.6, $body.hits.hits.7, $body.hits.hits.8, $body.hits.hits.9/] As for the response, we see the following parts: @@ -633,30 +675,34 @@ As for the response, we see the following parts: * `hits` – search results * `hits.total` – total number of documents matching our search criteria * `hits.hits` – actual array of search results (defaults to first 10 documents) +* `sort` - sort key for results (missing if sorting by score) * `_score` and `max_score` - ignore these fields for now Here is the same exact search above using the alternative request body method: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { - "query": { "match_all": {} } -}' + "query": { "match_all": {} }, + "sort": [ + { "account_number": "asc" } + ] +} -------------------------------------------------- +// CONSOLE +// TEST[continued] The difference here is that instead of passing `q=*` in the URI, we POST a JSON-style query request body to the `_search` API. We'll discuss this JSON query in the next section. -And the response (partially shown): +//// +Hidden response just so we can assert that it is indeed the same but don't have +to clutter the docs with it: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' { - "query": { "match_all": {} } -}' -{ - "took" : 26, + "took" : 63, "timed_out" : false, "_shards" : { "total" : 5, @@ -665,22 +711,30 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' }, "hits" : { "total" : 1000, - "max_score" : 1.0, + "max_score": null, "hits" : [ { + "_index" : "bank", + "_type" : "account", + "_id" : "0", + "sort": [0], + "_score": null, + "_source" : {"account_number":0,"balance":16623,"firstname":"Bradshaw","lastname":"Mckenzie","age":29,"gender":"F","address":"244 Columbus Place","employer":"Euron","email":"bradshawmckenzie@euron.com","city":"Hobucken","state":"CO"} + }, { "_index" : "bank", "_type" : "account", "_id" : "1", - "_score" : 1.0, "_source" : {"account_number":1,"balance":39225,"firstname":"Amber","lastname":"Duke","age":32,"gender":"M","address":"880 Holmes Lane","employer":"Pyrami","email":"amberduke@pyrami.com","city":"Brogan","state":"IL"} - }, { - "_index" : "bank", - "_type" : "account", - "_id" : "6", - "_score" : 1.0, "_source" : {"account_number":6,"balance":5686,"firstname":"Hattie","lastname":"Bond","age":36,"gender":"M","address":"671 Bristol Street","employer":"Netagy","email":"hattiebond@netagy.com","city":"Dante","state":"TN"} - }, { - "_index" : "bank", - "_type" : "account", - "_id" : "13", + "sort": [1], + "_score": null, + "_source" : {"account_number":1,"balance":39225,"firstname":"Amber","lastname":"Duke","age":32,"gender":"M","address":"880 Holmes Lane","employer":"Pyrami","email":"amberduke@pyrami.com","city":"Brogan","state":"IL"} + }, ... + ] + } +} -------------------------------------------------- +// TESTRESPONSE[s/"took" : 63/"took" : $body.took/] +// TESTRESPONSE[s/\.\.\./$body.hits.hits.2, $body.hits.hits.3, $body.hits.hits.4, $body.hits.hits.5, $body.hits.hits.6, $body.hits.hits.7, $body.hits.hits.8, $body.hits.hits.9/] + +//// It is important to understand that once you get your search results back, Elasticsearch is completely done with the request and does not maintain any kind of server-side resources or open cursors into your results. This is in stark contrast to many other platforms such as SQL wherein you may initially get a partial subset of your query results up-front and then you have to continuously go back to the server if you want to fetch (or page through) the rest of the results using some kind of stateful server-side cursor. @@ -690,52 +744,63 @@ Elasticsearch provides a JSON-style domain-specific language that you can use to Going back to our last example, we executed this query: -[source,sh] +[source,js] -------------------------------------------------- +GET /bank/_search { "query": { "match_all": {} } } -------------------------------------------------- +// CONSOLE +// TEST[continued] Dissecting the above, the `query` part tells us what our query definition is and the `match_all` part is simply the type of query that we want to run. The `match_all` query is simply a search for all documents in the specified index. -In addition to the `query` parameter, we also can pass other parameters to influence the search results. For example, the following does a `match_all` and returns only the first document: +In addition to the `query` parameter, we also can pass other parameters to +influence the search results. In the example in the section above we passed in +`sort`, here we pass in `size`: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "match_all": {} }, "size": 1 -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] Note that if `size` is not specified, it defaults to 10. This example does a `match_all` and returns documents 11 through 20: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "match_all": {} }, "from": 10, "size": 10 -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] The `from` parameter (0-based) specifies which document index to start from and the `size` parameter specifies how many documents to return starting at the from parameter. This feature is useful when implementing paging of search results. Note that if `from` is not specified, it defaults to 0. This example does a `match_all` and sorts the results by account balance in descending order and returns the top 10 (default size) documents. -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "match_all": {} }, "sort": { "balance": { "order": "desc" } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] === Executing Searches @@ -743,14 +808,16 @@ Now that we have seen a few of the basic search parameters, let's dig in some mo This example shows how to return two fields, `account_number` and `balance` (inside of `_source`), from the search: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "match_all": {} }, "_source": ["account_number", "balance"] -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] Note that the above example simply reduces the `_source` field. It will still only return one field named `_source` but within it, only the fields `account_number` and `balance` are included. @@ -760,51 +827,59 @@ Now let's move on to the query part. Previously, we've seen how the `match_all` This example returns the account numbered 20: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "match": { "account_number": 20 } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] This example returns all accounts containing the term "mill" in the address: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "match": { "address": "mill" } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] This example returns all accounts containing the term "mill" or "lane" in the address: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "match": { "address": "mill lane" } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] This example is a variant of `match` (`match_phrase`) that returns all accounts containing the phrase "mill lane" in the address: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "match_phrase": { "address": "mill lane" } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] Let's now introduce the <>. The `bool` query allows us to compose smaller queries into bigger queries using boolean logic. This example composes two `match` queries and returns all accounts containing "mill" and "lane" in the address: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "bool": { @@ -814,16 +889,18 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' ] } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] In the above example, the `bool must` clause specifies all the queries that must be true for a document to be considered a match. In contrast, this example composes two `match` queries and returns all accounts containing "mill" or "lane" in the address: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "bool": { @@ -833,16 +910,18 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' ] } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] In the above example, the `bool should` clause specifies a list of queries either of which must be true for a document to be considered a match. This example composes two `match` queries and returns all accounts that contain neither "mill" nor "lane" in the address: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "bool": { @@ -852,8 +931,10 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' ] } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] In the above example, the `bool must_not` clause specifies a list of queries none of which must be true for a document to be considered a match. @@ -861,9 +942,9 @@ We can combine `must`, `should`, and `must_not` clauses simultaneously inside a This example returns all accounts of anybody who is 40 years old but don't live in ID(aho): -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "bool": { @@ -875,8 +956,10 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' ] } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] === Executing Filters @@ -888,9 +971,9 @@ The <> that we introduced in the previous sec This example uses a bool query to return all accounts with balances between 20000 and 30000, inclusive. In other words, we want to find accounts with a balance that is greater than or equal to 20000 and less than or equal to 30000. -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "query": { "bool": { @@ -905,8 +988,10 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' } } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] Dissecting the above, the bool query contains a `match_all` query (the query part) and a `range` query (the filter part). We can substitute any other queries into the query and the filter parts. In the above case, the range query makes perfect sense since documents falling into the range all match "equally", i.e., no document is more relevant than another. @@ -918,9 +1003,9 @@ Aggregations provide the ability to group and extract statistics from your data. To start with, this example groups all the accounts by state, and then returns the top 10 (default) states sorted by count descending (also default): -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "size": 0, "aggs": { @@ -930,8 +1015,10 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' } } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] In SQL, the above aggregation is similar in concept to: @@ -942,8 +1029,16 @@ SELECT state, COUNT(*) FROM bank GROUP BY state ORDER BY COUNT(*) DESC And the response (partially shown): -[source,sh] +[source,js] -------------------------------------------------- +{ + "took": 29, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, "hits" : { "total" : 1000, "max_score" : 0.0, @@ -951,51 +1046,55 @@ And the response (partially shown): }, "aggregations" : { "group_by_state" : { + "doc_count_error_upper_bound": 20, + "sum_other_doc_count": 770, "buckets" : [ { - "key" : "al", + "key" : "ID", + "doc_count" : 27 + }, { + "key" : "TX", + "doc_count" : 27 + }, { + "key" : "AL", + "doc_count" : 25 + }, { + "key" : "MD", + "doc_count" : 25 + }, { + "key" : "TN", + "doc_count" : 23 + }, { + "key" : "MA", "doc_count" : 21 }, { - "key" : "tx", - "doc_count" : 17 + "key" : "NC", + "doc_count" : 21 }, { - "key" : "id", - "doc_count" : 15 + "key" : "ND", + "doc_count" : 21 }, { - "key" : "ma", - "doc_count" : 15 + "key" : "ME", + "doc_count" : 20 }, { - "key" : "md", - "doc_count" : 15 - }, { - "key" : "pa", - "doc_count" : 15 - }, { - "key" : "dc", - "doc_count" : 14 - }, { - "key" : "me", - "doc_count" : 14 - }, { - "key" : "mo", - "doc_count" : 14 - }, { - "key" : "nd", - "doc_count" : 14 + "key" : "MO", + "doc_count" : 20 } ] } } } -------------------------------------------------- +// TESTRESPONSE[s/"took": 29/"took": $body.took/] -We can see that there are 21 accounts in AL(abama), followed by 17 accounts in TX, followed by 15 accounts in ID(aho), and so forth. +We can see that there are 27 accounts in `ID` (Idaho), followed by 27 accounts +in `TX` (Texas), followed by 25 accounts in `AL` (Alabama), and so forth. Note that we set `size=0` to not show search hits because we only want to see the aggregation results in the response. Building on the previous aggregation, this example calculates the average account balance by state (again only for the top 10 states sorted by count in descending order): -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "size": 0, "aggs": { @@ -1012,16 +1111,18 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' } } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] Notice how we nested the `average_balance` aggregation inside the `group_by_state` aggregation. This is a common pattern for all the aggregations. You can nest aggregations inside aggregations arbitrarily to extract pivoted summarizations that you require from your data. Building on the previous aggregation, let's now sort on the average balance in descending order: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "size": 0, "aggs": { @@ -1041,14 +1142,16 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' } } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] This example demonstrates how we can group by age brackets (ages 20-29, 30-39, and 40-49), then by gender, and then finally get the average account balance, per age bracket, per gender: -[source,sh] +[source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' +GET /bank/_search { "size": 0, "aggs": { @@ -1086,8 +1189,10 @@ curl -XPOST 'localhost:9200/bank/_search?pretty' -d ' } } } -}' +} -------------------------------------------------- +// CONSOLE +// TEST[continued] There are a many other aggregations capabilities that we won't go into detail here. The <> is a great starting point if you want to do further experimentation. diff --git a/docs/reference/how-to.asciidoc b/docs/reference/how-to.asciidoc index f41c3a3bb9c..d709e17bb4e 100644 --- a/docs/reference/how-to.asciidoc +++ b/docs/reference/how-to.asciidoc @@ -17,6 +17,8 @@ made. include::how-to/general.asciidoc[] +include::how-to/recipes.asciidoc[] + include::how-to/indexing-speed.asciidoc[] include::how-to/search-speed.asciidoc[] diff --git a/docs/reference/how-to/indexing-speed.asciidoc b/docs/reference/how-to/indexing-speed.asciidoc index 50187af5b28..b0bd5fef802 100644 --- a/docs/reference/how-to/indexing-speed.asciidoc +++ b/docs/reference/how-to/indexing-speed.asciidoc @@ -5,7 +5,7 @@ === Use bulk requests Bulk requests will yield much better performance than single-document index -requests. In order to know the optimal size of a bulk request, you shoud run +requests. In order to know the optimal size of a bulk request, you should run a benchmark on a single node with a single shard. First try to index 100 documents at once, then 200, then 400, etc. doubling the number of documents in a bulk request in every benchmark run. When the indexing speed starts to @@ -32,7 +32,7 @@ When it happens, you should pause indexing a bit before trying again, ideally with randomized exponential backoff. Similarly to sizing bulk requests, only testing can tell what the optimal -number of workers is. This can be tested by progressivily increasing the +number of workers is. This can be tested by progressively increasing the number of workers until either I/O or CPU is saturated on the cluster. [float] @@ -58,7 +58,7 @@ original values. === Disable swapping You should make sure that the operating system is not swapping out the java -process by <>. +process by <>. [float] === Give memory to the filesystem cache diff --git a/docs/reference/how-to/recipes.asciidoc b/docs/reference/how-to/recipes.asciidoc new file mode 100644 index 00000000000..0bb158f88e8 --- /dev/null +++ b/docs/reference/how-to/recipes.asciidoc @@ -0,0 +1,304 @@ +[[recipes]] +== Recipes + +[float] +[[mixing-exact-search-with-stemming]] +=== Mixing exact search with stemming + +When building a search application, stemming is often a must as it is desirable +for a query on `skiing` to match documents that contain `ski` or `skis`. But +what if a user wants to search for `skiing` specifically? The typical way to do +this would be to use a <> in order to have the same +content indexed in two different ways: + +[source,js] +-------------------------------------------------- +PUT index +{ + "settings": { + "analysis": { + "analyzer": { + "english_exact": { + "tokenizer": "standard", + "filter": [ + "lowercase" + ] + } + } + } + }, + "mappings": { + "type": { + "properties": { + "body": { + "type": "text", + "analyzer": "english", + "fields": { + "exact": { + "type": "text", + "analyzer": "english_exact" + } + } + } + } + } + } +} + +PUT index/type/1 +{ + "body": "Ski resort" +} + +PUT index/type/2 +{ + "body": "A pair of skis" +} + +POST index/_refresh +-------------------------------------------------- +// CONSOLE + +With such a setup, searching for `ski` on `body` would return both documents: + +[source,js] +-------------------------------------------------- +GET index/_search +{ + "query": { + "simple_query_string": { + "fields": [ "body" ], + "query": "ski" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "took": 2, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, + "hits": { + "total": 2, + "max_score": 0.25811607, + "hits": [ + { + "_index": "index", + "_type": "type", + "_id": "2", + "_score": 0.25811607, + "_source": { + "body": "A pair of skis" + } + }, + { + "_index": "index", + "_type": "type", + "_id": "1", + "_score": 0.25811607, + "_source": { + "body": "Ski resort" + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 2,/"took": "$body.took",/] + +On the other hand, searching for `ski` on `body.exact` would only return +document `1` since the analysis chain of `body.exact` does not perform +stemming. + +[source,js] +-------------------------------------------------- +GET index/_search +{ + "query": { + "simple_query_string": { + "fields": [ "body.exact" ], + "query": "ski" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "took": 1, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, + "hits": { + "total": 1, + "max_score": 0.25811607, + "hits": [ + { + "_index": "index", + "_type": "type", + "_id": "1", + "_score": 0.25811607, + "_source": { + "body": "Ski resort" + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 1,/"took": "$body.took",/] + +This is not something that is easy to expose to end users, as we would need to +have a way to figure out whether they are looking for an exact match or not and +redirect to the appropriate field accordingly. Also what to do if only parts of +the query need to be matched exactly while other parts should still take +stemming into account? + +Fortunately, the `query_string` and `simple_query_string` queries have a feature +that allows to solve exactly this problem: `quote_field_suffix`. It allows to +tell Elasticsearch that words that appear in between quotes should be redirected +to a different field, see below: + +[source,js] +-------------------------------------------------- +GET index/_search +{ + "query": { + "simple_query_string": { + "fields": [ "body" ], + "quote_field_suffix": ".exact", + "query": "\"ski\"" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "took": 2, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, + "hits": { + "total": 1, + "max_score": 0.25811607, + "hits": [ + { + "_index": "index", + "_type": "type", + "_id": "1", + "_score": 0.25811607, + "_source": { + "body": "Ski resort" + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 2,/"took": "$body.took",/] + +In that case, since `ski` was in-between quotes, it was searched on the +`body.exact` field due to the `quote_field_suffix` parameter, so only document +`1` matched. This allows users to mix exact search with stemmed search as they +like. + +[float] +[[consistent-scoring]] +=== Getting consistent scoring + +The fact that Elasticsearch operates with shards and replicas adds challenges +when it comes to having good scoring. + +[float] +==== Scores are not reproducible + +Say the same user runs the same request twice in a row and documents do not come +back in the same order both times, this is a pretty bad experience isn't it? +Unfortunately this is something that can happen if you have replicas +(`index.number_of_replicas` is greater than 0). The reason is that Elasticsearch +selects the shards that the query should go to in a round-robin fashion, so it +is quite likely if you run the same query twice in a row that it will go to +different copies of the same shard. + +Now why is it a problem? Index statistics are an important part of the score. +And these index statistics may be different across copies of the same shard +due to deleted documents. As you may know when documents are deleted or updated, +the old document is not immediately removed from the index, it is just marked +as deleted and it will only be removed from disk on the next time that the +segment this old document belongs to is merged. However for practical reasons, +those deleted documents are taken into account for index statistics. So imagine +that the primary shard just finished a large merge that removed lots of deleted +documents, then it might have index statistics that are sufficiently different +from the replica (which still have plenty of deleted documents) so that scores +are different too. + +The recommended way to work around this issue is to use a string that identifies +the user that is logged is (a user id or session id for instance) as a +<>. This ensures that all queries of a +given user are always going to hit the same shards, so scores remain more +consistent across queries. + +This work around has another benefit: when two documents have the same score, +they will be sorted by their internal Lucene doc id (which is unrelated to the +`_id` or `_uid`) by default. However these doc ids could be different across +copies of the same shard. So by always hitting the same shard, we would get +more consistent ordering of documents that have the same scores. + +[float] +==== Relevancy looks wrong + +If you notice that two documents with the same content get different scores or +that an exact match is not ranked first, then the issue might be related to +sharding. By default, Elasticsearch makes each shard responsible for producing +its own scores. However since index statistics are an important contributor to +the scores, this only works well if shards have similar index statistics. The +assumption is that since documents are routed evenly to shards by default, then +index statistics should be very similar and scoring would work as expected. +However in the event that you either + - use routing at index time, + - query multiple _indices_, + - or have too little data in your index +then there are good chances that all shards that are involved in the search +request do not have similar index statistics and relevancy could be bad. + +If you have a small dataset, the easiest way to work around this issue is to +index everything into an index that has a single shard +(`index.number_of_shards: 1`). Then index statistics will be the same for all +documents and scores will be consistent. + +Otherwise the recommended way to work around this issue is to use the +<> search type. This will make +Elasticsearch perform an inital round trip to all involved shards, asking +them for their index statistics relatively to the query, then the coordinating +node will merge those statistics and send the merged statistics alongside the +request when asking shards to perform the `query` phase, so that shards can +use these global statistics rather than their own statistics in order to do the +scoring. + +In most cases, this additional round trip should be very cheap. However in the +event that your query contains a very large number of fields/terms or fuzzy +queries, beware that gathering statistics alone might not be cheap since all +terms have to be looked up in the terms dictionaries in order to look up +statistics. + diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index 1833b45a9ba..7930ed573b4 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -174,9 +174,41 @@ implementation used for these two methods, while not changing the `default`, it is possible to configure a similarity with the name `base`. This similarity will then be used for the two methods. -You can change the default similarity for all fields by putting the following setting into `elasticsearch.yml`: +You can change the default similarity for all fields in an index when +it is <>: [source,js] -------------------------------------------------- -index.similarity.default.type: classic +PUT /my_index +{ + "settings": { + "index": { + "similarity": { + "default": { + "type": "classic" + } + } + } + } +} +-------------------------------------------------- + +If you want to change the default similarity after creating the index +you must <> your index, send the follwing +request and <> it again afterwards: + +[source,js] +-------------------------------------------------- +PUT /my_index/_settings +{ + "settings": { + "index": { + "similarity": { + "default": { + "type": "classic" + } + } + } + } +} -------------------------------------------------- diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc index 423968bb071..2ac33a1cdf5 100644 --- a/docs/reference/index-modules/slowlog.asciidoc +++ b/docs/reference/index-modules/slowlog.asciidoc @@ -24,7 +24,7 @@ index.search.slowlog.threshold.fetch.debug: 500ms index.search.slowlog.threshold.fetch.trace: 200ms -------------------------------------------------- -All of the above settings are _dynamic_ and can be set per-index. +All of the above settings are _dynamic_ and are set per-index. By default, none are enabled (set to `-1`). Levels (`warn`, `info`, `debug`, `trace`) allow to control under which logging level the log @@ -42,7 +42,7 @@ level. The logging file is configured by default using the following configuration (found in `log4j2.properties`): -[source,yaml] +[source,properties] -------------------------------------------------- appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling @@ -67,8 +67,8 @@ logger.index_search_slowlog_rolling.additivity = false The indexing slow log, similar in functionality to the search slow log. The log file name ends with `_index_indexing_slowlog.log`. Log and -the thresholds are configured in the elasticsearch.yml file in the same -way as the search slowlog. Index slowlog sample: +the thresholds are configured in the same way as the search slowlog. +Index slowlog sample: [source,yaml] -------------------------------------------------- @@ -80,23 +80,31 @@ index.indexing.slowlog.level: info index.indexing.slowlog.source: 1000 -------------------------------------------------- -All of the above settings are _dynamic_ and can be set per-index. +All of the above settings are _dynamic_ and are set per-index. By default Elasticsearch will log the first 1000 characters of the _source in the slowlog. You can change that with `index.indexing.slowlog.source`. Setting it to `false` or `0` will skip logging the source entirely an setting it to `true` will log the entire source regardless of size. -The index slow log file is configured by default in the `logging.yml` +The index slow log file is configured by default in the `log4j2.properties` file: -[source,yaml] +[source,properties] -------------------------------------------------- -index_indexing_slow_log_file: - type: dailyRollingFile - file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log - datePattern: "'.'yyyy-MM-dd" - layout: - type: pattern - conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" +appender.index_indexing_slowlog_rolling.type = RollingFile +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log +appender.index_indexing_slowlog_rolling.policies.type = Policies +appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling.policies.time.interval = 1 +appender.index_indexing_slowlog_rolling.policies.time.modulate = true + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false -------------------------------------------------- diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index ee7fd8766fd..e33378ddd70 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -79,7 +79,7 @@ By default, elasticsearch completely relies on the operating system file system cache for caching I/O operations. It is possible to set `index.store.preload` in order to tell the operating system to load the content of hot index files into memory upon opening. This setting accept a comma-separated list of -files extensions: all files whose extenion is in the list will be pre-loaded +files extensions: all files whose extension is in the list will be pre-loaded upon opening. This can be useful to improve search performance of an index, especially when the host operating system is restarted, since this causes the file system cache to be trashed. However note that this may slow down the @@ -89,12 +89,12 @@ loaded into physical memory. This setting is best-effort only and may not work at all depending on the store type and host operating system. -The `index.store.pre_load` is a static setting that can either be set in the +The `index.store.preload` is a static setting that can either be set in the `config/elasticsearch.yml`: [source,yaml] --------------------------------- -index.store.pre_load: ["nvd", "dvd"] +index.store.preload: ["nvd", "dvd"] --------------------------------- or in the index settings at index creation time: @@ -104,7 +104,7 @@ or in the index settings at index creation time: PUT /my_index { "settings": { - "index.store.pre_load": ["nvd", "dvd"] + "index.store.preload": ["nvd", "dvd"] } } --------------------------------- @@ -116,7 +116,7 @@ values to be loaded eagerly into physical memory. These are the two first extensions to look at since elasticsearch performs random access on them. A wildcard can be used in order to indicate that all files should be preloaded: -`index.store.pre_load: ["*"]`. Note however that it is generally not useful to +`index.store.preload: ["*"]`. Note however that it is generally not useful to load all files into memory, in particular those for stored fields and term vectors, so a better option might be to set it to `["nvd", "dvd", "tim", "doc", "dim"]`, which will preload norms, doc values, diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index c79ab86d114..317500b474f 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -3,7 +3,13 @@ :version: 6.0.0-alpha1 :major-version: 6.x -:lucene_version: 6.2.0 + +////////// +release-state can be: released | prerelease | unreleased +////////// + +:release-state: unreleased +:lucene_version: 6.3.0 :branch: master :jdk: 1.8.0_73 :defguide: https://www.elastic.co/guide/en/elasticsearch/guide/master diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index 3c1eadfa07e..afb5088d85e 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -60,7 +60,6 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> -* <> -- @@ -114,5 +113,3 @@ include::indices/refresh.asciidoc[] include::indices/forcemerge.asciidoc[] -include::indices/upgrade.asciidoc[] - diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc index dbb2c8f101a..0d9d60d4845 100644 --- a/docs/reference/indices/analyze.asciidoc +++ b/docs/reference/indices/analyze.asciidoc @@ -100,21 +100,6 @@ curl -XGET 'localhost:9200/test/_analyze' -d ' Will cause the analysis to happen based on the analyzer configured in the mapping for `obj1.field1` (and if not, the default index analyzer). -All parameters can also supplied as request parameters. For example: - -[source,js] --------------------------------------------------- -curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&filter=lowercase&text=this+is+a+test' --------------------------------------------------- - -For backwards compatibility, we also accept the text parameter as the body of the request, -provided it doesn't start with `{` : - -[source,js] --------------------------------------------------- -curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&filter=lowercase&char_filter=html_strip' -d 'this is a test' --------------------------------------------------- - === Explain Analyze If you want to get more advanced details, set `explain` to `true` (defaults to `false`). It will output all token attributes for each token. diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index 6ee28e7b2f4..20ffdd44b30 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -34,7 +34,7 @@ POST /logs_write/_rollover <2> // TEST[s/# Add > 1000 documents to logs-000001/POST _reindex?refresh\n{"source":{"index":"twitter"},"dest":{"index":"logs-000001"}}/] <1> Creates an index called `logs-0000001` with the alias `logs_write`. <2> If the index pointed to by `logs_write` was created 7 or more days ago, or - contains 1,000 or more documents, then the `logs-0002` index is created + contains 1,000 or more documents, then the `logs-000002` index is created and the `logs_write` alias is updated to point to `logs-000002`. The above request might return the following response: @@ -83,6 +83,84 @@ POST /my_alias/_rollover/my_new_index_name // CONSOLE // TEST[s/^/PUT my_old_index_name\nPUT my_old_index_name\/_alias\/my_alias\n/] +[float] +=== Using date math with the rolllover API + +It can be useful to use <> to name the +rollover index according to the date that the index rolled over, e.g. +`logstash-2016.02.03`. The rollover API supports date math, but requires the +index name to end with a dash followed by a number, e.g. +`logstash-2016.02.03-1` which is incremented every time the index is rolled +over. For instance: + +[source,js] +-------------------------------------------------- +# PUT / with URI encoding: +PUT /%3Clogs-%7Bnow%2Fd%7D-1%3E <1> +{ + "aliases": { + "logs_write": {} + } +} + +PUT logs_write/log/1 +{ + "message": "a dummy log" +} + +# Wait for a day to pass + +POST /logs_write/_rollover <2> +{ + "conditions": { + "max_docs": "1" + } +} +-------------------------------------------------- +// CONSOLE +// TEST[s/now/2016.10.31||/] +<1> Creates an index named with today's date (e.g.) `logs-2016.10.31-1` +<2> Rolls over to a new index with today's date, e.g. `logs-2016.10.31-000002` if run immediately, or `logs-2016.11.01-000002` if run after 24 hours + +////////////////////////// + +[source,js] +-------------------------------------------------- +GET _alias +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "logs-2016.10.31-000002": { + "aliases": { + "logs_write": {} + } + }, + "logs-2016.10.31-1": { + "aliases": {} + } +} +-------------------------------------------------- +// TESTRESPONSE + +////////////////////////// + +These indices can then be referenced as described in the +<>. For example, to search +over indices created in the last three days, you could do the following: + +[source,js] +-------------------------------------------------- +# GET /,,/_search +GET /%3Clogs-%7Bnow%2Fd%7D-*%3E%2C%3Clogs-%7Bnow%2Fd-1d%7D-*%3E%2C%3Clogs-%7Bnow%2Fd-2d%7D-*%3E/_search +-------------------------------------------------- +// CONSOLE +// TEST[continued] +// TEST[s/now/2016.10.31||/] + [float] === Defining the new index @@ -144,5 +222,5 @@ POST /logs_write/_rollover?dry_run === Wait For Active Shards Because the rollover operation creates a new index to rollover to, the -<> setting on +<> setting on index creation applies to the rollover action as well. diff --git a/docs/reference/indices/templates.asciidoc b/docs/reference/indices/templates.asciidoc index 6e2f7ce91f1..95cd32578a0 100644 --- a/docs/reference/indices/templates.asciidoc +++ b/docs/reference/indices/templates.asciidoc @@ -40,6 +40,9 @@ PUT _template/template_1 // CONSOLE // TESTSETUP +NOTE: Index templates provide C-style /* */ block comments. Comments are allowed +everywhere in the JSON document except before to the initial opening curly bracket. + Defines a template named template_1, with a template pattern of `te*`. The settings and mappings will be applied to any index name that matches the `te*` template. @@ -178,3 +181,50 @@ for indices of that start with `te*`, source will still be enabled. Note, for mappings, the merging is "deep", meaning that specific object/property based mappings can easily be added/overridden on higher order templates, with lower order templates providing the basis. + +[float] +[[versioning-templates]] +=== Template Versioning + +Templates can optionally add a `version` number, which can be any integer value, +in order to simplify template management by external systems. The `version` +field is completely optional and it is meant solely for external management of +templates. To unset a `version`, simply replace the template without specifying +one. + +[source,js] +-------------------------------------------------- +PUT /_template/template_1 +{ + "template" : "*", + "order" : 0, + "settings" : { + "number_of_shards" : 1 + }, + "version": 123 +} +-------------------------------------------------- +// CONSOLE + +To check for the `version`, you can +<> +using `filter_path` to limit the response to just the `version`: + +[source,js] +-------------------------------------------------- +GET /_template/template_1?filter_path=*.version +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +This should give a small response that makes it both easy and inexpensive to parse: + +[source,js] +-------------------------------------------------- +{ + "template_1" : { + "version" : 123 + } +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/docs/reference/indices/upgrade.asciidoc b/docs/reference/indices/upgrade.asciidoc deleted file mode 100644 index 1b8e99f2125..00000000000 --- a/docs/reference/indices/upgrade.asciidoc +++ /dev/null @@ -1,123 +0,0 @@ -[[indices-upgrade]] -== Upgrade - -The upgrade API allows to upgrade one or more indices to the latest Lucene -format through an API. The upgrade process converts any segments written with -older formats. - -[IMPORTANT] -=================================================== - -**The upgrade API in its current form will not help you to migrate indices -created in Elasticsearch 1.x to 5.x.** - -The upgrade API rewrites an index in the latest Lucene format, but it still -retains the original data structures that were used when the index was first -created. For instance: - -* Doc-values on numeric fields used to use BinaryDocValues, but now use dedicated NumericDocValues. -* The parent-child feature has been completely rewritten to use a new data structure. -* Geo-point fields now require doc values and the Lucene index where, previously, they relied on in-memory calculations. - -**Migrating 1.x indices to 5.x** - -The only way to prepare an index created in 1.x for use in 5.x is to **reindex -your data** in a cluster running Elasticsearch 2.3.x, which you can do with -the new <>. - -The steps to do this are as follows: - -1. Create a new index (e.g. `new_index`) with the correct settings and - mappings. These can be retrieved from the old index with the - <> API. - -2. Reindex from `old_index` to `new_index` with the - <>. - -3. Retrieve a list of any aliases associated with the `old_index` using the - <>. - -4. Delete the `old_index` using the <>. - -5. Add an alias called `old_index` to the `new_index` along with any aliases - returned in step 3, using the <>. - -In the future, we plan to change the upgrade API to perform a reindex-in- -place. In other words, it would reindex data from `old_index` to `.old_index` -then atomically delete `old_index` and rename `.old_index` to `old_index`. - -=================================================== - - -[float] -=== Start an upgrade - -[source,sh] --------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/twitter/_upgrade' --------------------------------------------------- - -NOTE: Upgrading is an I/O intensive operation, and is limited to processing a -single shard per node at a time. It also is not allowed to run at the same -time as an optimize/force-merge. - -This call will block until the upgrade is complete. If the http connection -is lost, the request will continue in the background, and -any new requests will block until the previous upgrade is complete. - -[float] -[[upgrade-parameters]] -==== Request Parameters - -The `upgrade` API accepts the following request parameters: - -[horizontal] -`only_ancient_segments`:: If true, only very old segments (from a -previous Lucene major release) will be upgraded. While this will do -the minimal work to ensure the next major release of Elasticsearch can -read the segments, it's dangerous because it can leave other very old -segments in sub-optimal formats. Defaults to `false`. - -[float] -=== Check upgrade status - -Use a `GET` request to monitor how much of an index is upgraded. This -can also be used prior to starting an upgrade to identify which -indices you want to upgrade at the same time. - -The `ancient` byte values that are returned indicate total bytes of -segments whose version is extremely old (Lucene major version is -different from the current version), showing how much upgrading is -necessary when you run with `only_ancient_segments=true`. - -[source,sh] --------------------------------------------------- -curl 'http://localhost:9200/twitter/_upgrade?pretty&human' --------------------------------------------------- - -[source,js] --------------------------------------------------- -{ - "size": "21gb", - "size_in_bytes": "21000000000", - "size_to_upgrade": "10gb", - "size_to_upgrade_in_bytes": "10000000000" - "size_to_upgrade_ancient": "1gb", - "size_to_upgrade_ancient_in_bytes": "1000000000" - "indices": { - "twitter": { - "size": "21gb", - "size_in_bytes": "21000000000", - "size_to_upgrade": "10gb", - "size_to_upgrade_in_bytes": "10000000000" - "size_to_upgrade_ancient": "1gb", - "size_to_upgrade_ancient_in_bytes": "1000000000" - } - } -} --------------------------------------------------- - -The level of details in the upgrade status command can be controlled by -setting `level` parameter to `cluster`, `index` (default) or `shard` levels. -For example, you can run the upgrade status command with `level=shard` to -get detailed upgrade information of each individual shard. diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index a71fec1c3b3..be5ae9d2c56 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -90,6 +90,56 @@ For each returned pipeline, the source and the version are returned. The version is useful for knowing which version of the pipeline the node has. You can specify multiple IDs to return more than one pipeline. Wildcards are also supported. +[float] +[[versioning-pipelines]] +==== Pipeline Versioning + +Pipelines can optionally add a `version` number, which can be any integer value, +in order to simplify pipeline management by external systems. The `version` +field is completely optional and it is meant solely for external management of +pipelines. To unset a `version`, simply replace the pipeline without specifying +one. + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/my-pipeline-id +{ + "description" : "describe pipeline", + "version" : 123, + "processors" : [ + { + "set" : { + "field": "foo", + "value": "bar" + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +To check for the `version`, you can +<> +using `filter_path` to limit the response to just the `version`: + +[source,js] +-------------------------------------------------- +GET /_ingest/pipeline/my-pipeline-id?filter_path=*.version +-------------------------------------------------- +// TEST[continued] + +This should give a small response that makes it both easy and inexpensive to parse: + +[source,js] +-------------------------------------------------- +{ + "my-pipeline-id" : { + "version" : 123 + } +} +-------------------------------------------------- +// TESTRESPONSE + [[delete-pipeline-api]] === Delete Pipeline API @@ -660,7 +710,7 @@ A node will not start if either of these plugins are not available. The <> can be used to fetch ingest usage statistics, globally and on a per pipeline basis. Useful to find out which pipelines are used the most or spent the most time on preprocessing. -[[append-procesesor]] +[[append-processor]] === Append Processor Appends one or more values to an existing array if the field already exists and it is an array. Converts a scalar to an array and appends one or more values to it if the field exists and it is a scalar. @@ -1384,14 +1434,16 @@ caching see <>. .Script Options [options="header"] |====== -| Name | Required | Default | Description -| `lang` | no | - | The scripting language -| `file` | no | - | The script file to refer to -| `id` | no | - | The stored script id to refer to -| `inline` | no | - | An inline script to be executed -| `params` | no | - | Script Parameters +| Name | Required | Default | Description +| `lang` | no | "painless" | The scripting language +| `file` | no | - | The script file to refer to +| `id` | no | - | The stored script id to refer to +| `inline` | no | - | An inline script to be executed +| `params` | no | - | Script Parameters |====== +One of `file`, `id`, `inline` options must be provided in order to properly reference a script to execute. + You can access the current ingest document from within the script context by using the `ctx` variable. The following example sets a new field called `field_a_plus_b_times_c` to be the sum of two existing @@ -1641,4 +1693,4 @@ pipeline should be used: -------------------------------------------------- The reason for this is that Ingest doesn't know how to automatically cast -a scalar field to an object field. \ No newline at end of file +a scalar field to an object field. diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index 21b0eadf683..468df64b1d4 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -353,7 +353,7 @@ remove it as described in the previous section. ===== Time-series -When doing time series analysis with elastisearch, it is common to have many +When doing time series analysis with elasticsearch, it is common to have many numeric fields that you will often aggregate on but never filter on. In such a case, you could disable indexing on those fields to save disk space and also maybe gain some indexing speed: diff --git a/docs/reference/mapping/params/analyzer.asciidoc b/docs/reference/mapping/params/analyzer.asciidoc index c075b662805..0b60451e02a 100644 --- a/docs/reference/mapping/params/analyzer.asciidoc +++ b/docs/reference/mapping/params/analyzer.asciidoc @@ -60,13 +60,15 @@ PUT /my_index } } -GET my_index/_analyze?field=text <3> +GET my_index/_analyze <3> { + "field": "text", "text": "The quick Brown Foxes." } -GET my_index/_analyze?field=text.english <4> +GET my_index/_analyze <4> { + "field": "text.english", "text": "The quick Brown Foxes." } -------------------------------------------------- diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index 905a7ffe90a..3619017141f 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -147,17 +147,18 @@ The following tables lists all the defaults ISO formats supported: `date_hour` or `strict_date_hour`:: - A formatter that combines a full date and two digit hour of day. + A formatter that combines a full date and two digit hour of day: + `yyyy-MM-dd'T'HH`. `date_hour_minute` or `strict_date_hour_minute`:: A formatter that combines a full date, two digit hour of day, and two - digit minute of hour. + digit minute of hour: `yyyy-MM-dd'T'HH:mm`. `date_hour_minute_second` or `strict_date_hour_minute_second`:: A formatter that combines a full date, two digit hour of day, two digit - minute of hour, and two digit second of minute. + minute of hour, and two digit second of minute: `yyyy-MM-dd'T'HH:mm:ss`. `date_hour_minute_second_fraction` or `strict_date_hour_minute_second_fraction`:: @@ -183,16 +184,17 @@ The following tables lists all the defaults ISO formats supported: `hour` or `strict_hour`:: - A formatter for a two digit hour of day. + A formatter for a two digit hour of day: `HH` `hour_minute` or `strict_hour_minute`:: - A formatter for a two digit hour of day and two digit minute of hour. + A formatter for a two digit hour of day and two digit minute of hour: + `HH:mm`. `hour_minute_second` or `strict_hour_minute_second`:: A formatter for a two digit hour of day, two digit minute of hour, and two - digit second of minute. + digit second of minute: `HH:mm:ss`. `hour_minute_second_fraction` or `strict_hour_minute_second_fraction`:: @@ -258,27 +260,28 @@ The following tables lists all the defaults ISO formats supported: `weekyear` or `strict_weekyear`:: - A formatter for a four digit weekyear. + A formatter for a four digit weekyear: `xxxx`. `weekyear_week` or `strict_weekyear_week`:: - A formatter for a four digit weekyear and two digit week of weekyear. + A formatter for a four digit weekyear and two digit week of weekyear: + `xxxx-'W'ww`. `weekyear_week_day` or `strict_weekyear_week_day`:: A formatter for a four digit weekyear, two digit week of weekyear, and one - digit day of week. + digit day of week: `xxxx-'W'ww-e`. `year` or `strict_year`:: - A formatter for a four digit year. + A formatter for a four digit year: `yyyy`. `year_month` or `strict_year_month`:: - A formatter for a four digit year and two digit month of year. + A formatter for a four digit year and two digit month of year: `yyyy-MM`. `year_month_day` or `strict_year_month_day`:: A formatter for a four digit year, two digit month of year, and two digit - day of month. + day of month: `yyyy-MM-dd`. diff --git a/docs/reference/mapping/params/similarity.asciidoc b/docs/reference/mapping/params/similarity.asciidoc index 527e961960b..66b1d8a42cf 100644 --- a/docs/reference/mapping/params/similarity.asciidoc +++ b/docs/reference/mapping/params/similarity.asciidoc @@ -15,14 +15,14 @@ similarities. For more details about this expert options, see the The only similarities which can be used out of the box, without any further configuration are: -`classic`:: - The Default TF/IDF algorithm used by Elasticsearch and - Lucene. See {defguide}/practical-scoring-function.html[Lucene’s Practical Scoring Function] +`BM25`:: + The Okapi BM25 algorithm. The algorithm used by default in Elasticsearch and Lucene. + See {defguide}/pluggable-similarites.html[Pluggable Similarity Algorithms] for more information. -`BM25`:: - The Okapi BM25 algorithm. - See {defguide}/pluggable-similarites.html[Pluggable Similarity Algorithms] +`classic`:: + The TF/IDF algorithm which used to be the default in Elasticsearch and + Lucene. See {defguide}/practical-scoring-function.html[Lucene’s Practical Scoring Function] for more information. @@ -39,9 +39,9 @@ PUT my_index "default_field": { <1> "type": "text" }, - "bm25_field": { + "classic_field": { "type": "text", - "similarity": "BM25" <2> + "similarity": "classic" <2> } } } @@ -49,6 +49,5 @@ PUT my_index } -------------------------------------------------- // CONSOLE -<1> The `default_field` uses the `classic` similarity (ie TF/IDF). -<2> The `bm25_field` uses the `BM25` similarity. - +<1> The `default_field` uses the `BM25` similarity. +<2> The `classic_field` uses the `classic` similarity (ie TF/IDF). diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 1f0c76e1b93..4fe185fe463 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -175,7 +175,7 @@ this into a tree_levels setting of 26. ===== Performance considerations Elasticsearch uses the paths in the prefix tree as terms in the index -and in queries. The higher the levels is (and thus the precision), the +and in queries. The higher the level is (and thus the precision), the more terms are generated. Of course, calculating the terms, keeping them in memory, and storing them on disk all have a price. Especially with higher tree levels, indices can become extremely large even with a modest diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index fa260bbeff6..7c09ef46e55 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -6,7 +6,7 @@ codes, zip codes or tags. They are typically used for filtering (_Find me all blog posts where ++status++ is ++published++_), for sorting, and for aggregations. Keyword -fields are ony searchable by their exact value. +fields are only searchable by their exact value. If you need to index full text content such as email bodies or product descriptions, it is likely that you should rather use a <> field. diff --git a/docs/reference/migration/migrate_6_0.asciidoc b/docs/reference/migration/migrate_6_0.asciidoc index 735455d98c3..1debb5e2fc5 100644 --- a/docs/reference/migration/migrate_6_0.asciidoc +++ b/docs/reference/migration/migrate_6_0.asciidoc @@ -24,12 +24,16 @@ way to reindex old indices is to use the `reindex` API. [float] === Also see: +* <> * <> * <> * <> * <> +* <> * <> +include::migrate_6_0/cat.asciidoc[] + include::migrate_6_0/rest.asciidoc[] include::migrate_6_0/search.asciidoc[] @@ -38,4 +42,6 @@ include::migrate_6_0/docs.asciidoc[] include::migrate_6_0/cluster.asciidoc[] +include::migrate_6_0/settings.asciidoc[] + include::migrate_6_0/plugins.asciidoc[] diff --git a/docs/reference/migration/migrate_6_0/cat.asciidoc b/docs/reference/migration/migrate_6_0/cat.asciidoc new file mode 100644 index 00000000000..013c0705991 --- /dev/null +++ b/docs/reference/migration/migrate_6_0/cat.asciidoc @@ -0,0 +1,7 @@ +[[breaking_60_cat_changes]] +=== Cat API changes + +==== Unbounded queue size in cat thread pool + +Previously if a queue size backing a thread pool was unbounded, the cat thread pool API would output an empty string in +the queue_size column. This has been changed to now output -1 so that the output is always present and always numeric. diff --git a/docs/reference/migration/migrate_6_0/plugins.asciidoc b/docs/reference/migration/migrate_6_0/plugins.asciidoc index ff8a75ab448..bf73dc10e94 100644 --- a/docs/reference/migration/migrate_6_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_6_0/plugins.asciidoc @@ -3,5 +3,5 @@ ==== Mapper attachments plugin -* The mapper attachments plugin has been depecated in elasticsearch 5.0 and is now removed. +* The mapper attachments plugin has been deprecated in elasticsearch 5.0 and is now removed. You can use {plugins}/ingest-attachment.html[ingest attachment plugin] instead. diff --git a/docs/reference/migration/migrate_6_0/rest.asciidoc b/docs/reference/migration/migrate_6_0/rest.asciidoc index 1e02df1f61f..e68682da9ed 100644 --- a/docs/reference/migration/migrate_6_0/rest.asciidoc +++ b/docs/reference/migration/migrate_6_0/rest.asciidoc @@ -4,6 +4,16 @@ ==== Unquoted JSON In previous versions of Elasticsearch, JSON documents were allowed to contain unquoted field names. -This feature was removed in the 5.x series, but a backwards-compability layer was added via the -system property `elasticsearch.json.allow_unquoted_field_names`. This backwards-compability layer +This feature was removed in the 5.x series, but a backwards-compatibility layer was added via the +system property `elasticsearch.json.allow_unquoted_field_names`. This backwards-compatibility layer has been removed in Elasticsearch 6.0.0. + +==== Analyze API changes + +The deprecated request parameters and plain text in request body has been removed. Define parameters in request body. + +==== Indices exists API + +The `ignore_unavailable` and `allow_no_indices` options are no longer accepted +as they could cause undesired results when their values differed from their +defaults. diff --git a/docs/reference/migration/migrate_6_0/settings.asciidoc b/docs/reference/migration/migrate_6_0/settings.asciidoc new file mode 100644 index 00000000000..ea1162e005a --- /dev/null +++ b/docs/reference/migration/migrate_6_0/settings.asciidoc @@ -0,0 +1,10 @@ +[[breaking_60_settings_changes]] +=== Settings changes + +==== Snapshot settings + +The internal setting `cluster.routing.allocation.snapshot.relocation_enabled` that allowed shards with running snapshots to be reallocated to +different nodes has been removed. Enabling this setting could cause allocation issues if a shard got allocated off a node and then +reallocated back to this node while a snapshot was running. + + diff --git a/docs/reference/modules/cluster/allocation_filtering.asciidoc b/docs/reference/modules/cluster/allocation_filtering.asciidoc index 437f243c018..c173e4eb385 100644 --- a/docs/reference/modules/cluster/allocation_filtering.asciidoc +++ b/docs/reference/modules/cluster/allocation_filtering.asciidoc @@ -6,6 +6,31 @@ allocation of shards to nodes, cluster-level shard allocation filtering allows you to allow or disallow the allocation of shards from *any* index to particular nodes. +The available _dynamic_ cluster settings are as follows, where `{attribute}` +refers to an arbitrary node attribute.: + +`cluster.routing.allocation.include.{attribute}`:: + + Allocate shards to a node whose `{attribute}` has at least one of the + comma-separated values. + +`cluster.routing.allocation.require.{attribute}`:: + + Only allocate shards to a node whose `{attribute}` has _all_ of the + comma-separated values. + +`cluster.routing.allocation.exclude.{attribute}`:: + + Do not allocate shards to a node whose `{attribute}` has _none_ of the + comma-separated values. + +These special attributes are also supported: + +[horizontal] +`_name`:: Match nodes by node names +`_ip`:: Match nodes by IP addresses (the IP address associated with the hostname) +`_host`:: Match nodes by hostnames + The typical use case for cluster-wide shard allocation filtering is when you want to decommission a node, and you would like to move the shards from that node to other nodes in the cluster before shutting it down. @@ -27,35 +52,8 @@ NOTE: Shards will only be relocated if it is possible to do so without breaking another routing constraint, such as never allocating a primary and replica shard to the same node. -Cluster-wide shard allocation filtering works in the same way as index-level -shard allocation filtering (see <> for details). - -The available _dynamic_ cluster settings are as follows, where `{attribute}` -refers to an arbitrary node attribute.: - -`cluster.routing.allocation.include.{attribute}`:: - - Assign the index to a node whose `{attribute}` has at least one of the - comma-separated values. - -`cluster.routing.allocation.require.{attribute}`:: - - Assign the index to a node whose `{attribute}` has _all_ of the - comma-separated values. - -`cluster.routing.allocation.exclude.{attribute}`:: - - Assign the index to a node whose `{attribute}` has _none_ of the - comma-separated values. - -These special attributes are also supported: - -[horizontal] -`_name`:: Match nodes by node name -`_ip`:: Match nodes by IP address (the IP address associated with the hostname) -`_host`:: Match nodes by hostname - -All attribute values can be specified with wildcards, eg: +In addition to listing multiple values as a comma-separated list, all +attribute values can be specified with wildcards, eg: [source,js] ------------------------ diff --git a/docs/reference/modules/discovery/zen.asciidoc b/docs/reference/modules/discovery/zen.asciidoc index f1439adc8b6..082567053b2 100644 --- a/docs/reference/modules/discovery/zen.asciidoc +++ b/docs/reference/modules/discovery/zen.asciidoc @@ -70,7 +70,7 @@ active master node will step down and a new master election will be begin. This setting must be set to a quorum of your master eligible nodes. It is recommended to avoid having only two master eligible nodes, since a quorum of two is two. Therefore, a loss -of either master node will result in an inoperable cluster. +of either master eligible node will result in an inoperable cluster. [float] [[fault-detection]] diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index e1cc3e0b866..5ca8acf37cf 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -8,7 +8,7 @@ then you have a cluster of one node. Every node in the cluster can handle <> and <> traffic by default. The transport layer -is used exclusively for communication between nodes and between nodes and the +is used exclusively for communication between nodes and the {javaclient}/transport-client.html[Java `TransportClient`]; the HTTP layer is used only by external REST clients. @@ -287,7 +287,7 @@ machine. In production, however, it is recommended to run only one node of Elast By default, Elasticsearch is configured to prevent more than one node from sharing the same data path. To allow for more than one node (e.g., on your development machine), use the setting -`node.max_local_storage_nodes` and set this to a positve integer larger than one. +`node.max_local_storage_nodes` and set this to a positive integer larger than one. WARNING: Never run different node types (i.e. master, data) from the same data directory. This can lead to unexpected data loss. diff --git a/docs/reference/modules/scripting/groovy.asciidoc b/docs/reference/modules/scripting/groovy.asciidoc index aaacd85f243..24974e5733f 100644 --- a/docs/reference/modules/scripting/groovy.asciidoc +++ b/docs/reference/modules/scripting/groovy.asciidoc @@ -3,7 +3,7 @@ deprecated[5.0.0,Groovy will be replaced by the new scripting language <>] -Groovy is the default scripting language available in Elasticsearch. Although +Groovy is available in Elasticsearch by default. Although limited by the <>, it is not a sandboxed language and only `file` scripts may be used by default. diff --git a/docs/reference/modules/scripting/painless-syntax.asciidoc b/docs/reference/modules/scripting/painless-syntax.asciidoc index 1191facc369..fa8c1e60aa2 100644 --- a/docs/reference/modules/scripting/painless-syntax.asciidoc +++ b/docs/reference/modules/scripting/painless-syntax.asciidoc @@ -28,6 +28,23 @@ String constants can be declared with single quotes, to avoid escaping horrors w def mystring = 'foo'; --------------------------------------------------------- +[float] +[[painless-arrays]] +==== Arrays + +Arrays can be subscripted starting from `0` for traditional array access or with +negative numbers to starting from the back of the array. So the following +returns `2`. + +[source,painless] +--------------------------------------------------------- +int[] x = new int[5]; +x[0]++; +x[-5]++; +return x[0]; +--------------------------------------------------------- + + [float] [[painless-lists]] ==== List @@ -39,11 +56,13 @@ Lists can be created explicitly (e.g. `new ArrayList()`) or initialized similar def list = [1,2,3]; --------------------------------------------------------- -Lists can also be accessed similar to arrays: they support subscript and `.length`: +Lists can also be accessed similar to arrays. They support `.length` and +subscripts, including negative subscripts to read from the back of the list: [source,painless] --------------------------------------------------------- def list = [1,2,3]; +list[-1] = 5 return list[0] --------------------------------------------------------- @@ -111,6 +130,37 @@ using these characters: |`x` | COMMENTS (aka extended) | `'a' ==~ /a #comment/x` |======================================================================= +[float] +[[painless-deref]] +=== Dereferences + +Like lots of languages, Painless uses `.` to reference fields and call methods: + +[source,painless] +--------------------------------------------------------- +String foo = 'foo'; +TypeWithGetterOrPublicField bar = new TypeWithGetterOrPublicField() +return foo.length() + bar.x +--------------------------------------------------------- + +Like Groovy, Painless uses `?.` to perform null-safe references, with the +result being `null` if the left hand side is null: + +[source,painless] +--------------------------------------------------------- +String foo = null; +return foo?.length() // Returns null +--------------------------------------------------------- + +Unlike Groovy, Painless doesn't support writing to null values with this +operator: + +[source,painless] +--------------------------------------------------------- +TypeWithSetterOrPublicField foo = null; +foo?.x = 'bar' // Compile error +--------------------------------------------------------- + [float] [[painless-operators]] === Operators diff --git a/docs/reference/modules/scripting/using.asciidoc b/docs/reference/modules/scripting/using.asciidoc index 17ab4a8180a..b09a54e5c46 100644 --- a/docs/reference/modules/scripting/using.asciidoc +++ b/docs/reference/modules/scripting/using.asciidoc @@ -8,12 +8,12 @@ the same pattern: ------------------------------------- "script": { "lang": "...", <1> - "inline" | "id" | "file": "...", <2> + "inline" | "stored" | "file": "...", <2> "params": { ... } <3> } ------------------------------------- <1> The language the script is written in, which defaults to `painless`. -<2> The script itself which may be specfied as `inline`, `id`, or `file`. +<2> The script itself which may be specified as `inline`, `stored`, or `file`. <3> Any named parameters that should be passed into the script. For example, the following script is used in a search request to return a @@ -211,7 +211,7 @@ GET _scripts/groovy/calculate-score // CONSOLE // TEST[continued] -Stored scripts can be used by specifying the `lang` and `id` parameters as follows: +Stored scripts can be used by specifying the `lang` and `stored` parameters as follows: [source,js] -------------------------------------------------- @@ -221,7 +221,7 @@ GET _search "script": { "script": { "lang": "groovy", - "id": "calculate-score", + "stored": "calculate-score", "params": { "my_modifier": 2 } diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 9fd8e069480..aa6846d1e8a 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -1,9 +1,20 @@ [[modules-snapshots]] == Snapshot And Restore -The snapshot and restore module allows to create snapshots of individual indices or an entire cluster into a remote -repository. At the time of the initial release only shared file system repository was supported, but now a range of -backends are available via officially supported repository plugins. +The snapshot and restore module allows to create snapshots of individual +indices or an entire cluster into a remote repository like shared file system, +S3, or HDFS. These snapshots are great for backups because they can be restored +relatively quickly but they are not archival because they can only be restored +to versions of Elasticsearch that can read the index. That means that: + +* A snapshot of an index created in 2.x can be restored to 5.x. +* A snapshot of an index created in 1.x can be restored to 2.x. +* A snapshot of an index created in 1.x can **not** be restored to 5.x. + +To restore a snapshot of an index created in 1.x to 5.x you can restore it to +a 2.x cluster and use <> to rebuild +the index in a 5.x cluster. This is as time consuming as restoring from +archival copies of the original data. [float] === Repositories @@ -277,7 +288,7 @@ GET /_snapshot/my_backup/_all ----------------------------------- // CONSOLE -The command fails if some of the snapshots are unavailable. The boolean parameter `ignore_unvailable` can be used to +The command fails if some of the snapshots are unavailable. The boolean parameter `ignore_unavailable` can be used to return all snapshots that are currently available. A currently running snapshot can be retrieved using the following command: @@ -516,5 +527,3 @@ well as the global metadata were readable. The restore operation requires the gl the index level blocks are ignored during restore because indices are essentially recreated during restore. Please note that a repository content is not part of the cluster and therefore cluster blocks don't affect internal repository operations such as listing or deleting snapshots from an already registered repository. - - diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index a333312e0fb..4e8b5c61efd 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -118,14 +118,30 @@ thread_pool: [[processors]] === Processors setting The number of processors is automatically detected, and the thread pool -settings are automatically set based on it. Sometimes, the number of processors -are wrongly detected, in such cases, the number of processors can be -explicitly set using the `processors` setting. +settings are automatically set based on it. In some cases it can be +useful to override the number of detected processors. This can be done +by explicitly setting the `processors` setting. [source,yaml] -------------------------------------------------- processors: 2 -------------------------------------------------- +There are a few use-cases for explicitly overriding the `processors` +setting: + +. If you are running multiple instances of Elasticsearch on the same +host but want Elasticsearch to size its thread pools as if it only has a +fraction of the CPU, you should override the `processors` setting to the +desired fraction (e.g., if you're running two instances of Elasticsearch +on a 16-core machine, set `processors` to 8). Note that this is an +expert-level use-case and there's a lot more involved than just setting +the `processors` setting as there are other considerations like changing +the number of garbage collector threads, pinning processes to cores, +etc. +. Sometimes the number of processors is wrongly detected and in such +cases explicitly setting the `processors` setting will workaround such +issues. + In order to check the number of processors detected, use the nodes info API with the `os` flag. diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index bf9b6e8f9d4..231666d6bed 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -80,13 +80,6 @@ The following parameters can be configured like that * `tcp_send_buffer_size`: Configures the send buffer size of the socket * `tcp_receive_buffer_size`: Configures the receive buffer size of the socket -[float] -=== Local Transport - -This is a handy transport to use when running integration tests within -the JVM. It is automatically enabled when using -`NodeBuilder#local(true)`. - [float] === Transport Tracer diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index 3072372d179..6e83dd927e0 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -318,7 +318,7 @@ In the above example, the field is a <> and origin can be math (for example `now-1h`) is supported for origin. `scale`:: - Required for all types. Defines the distance from origin + offest at which the computed + Required for all types. Defines the distance from origin + offset at which the computed score will equal `decay` parameter. For geo fields: Can be defined as number+unit (1km, 12m,...). Default unit is meters. For date fields: Can to be defined as a number+unit ("1h", "10d",...). Default unit is milliseconds. For numeric field: Any number. diff --git a/docs/reference/query-dsl/fuzzy-query.asciidoc b/docs/reference/query-dsl/fuzzy-query.asciidoc index f320e81b579..4df30dec2f1 100644 --- a/docs/reference/query-dsl/fuzzy-query.asciidoc +++ b/docs/reference/query-dsl/fuzzy-query.asciidoc @@ -24,6 +24,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:fuzzy query is deprecated. Instead use the [match] query with fuzziness parameter] Or with more advanced settings: @@ -45,6 +46,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:fuzzy query is deprecated. Instead use the [match] query with fuzziness parameter] [float] ===== Parameters diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 40debb57105..1307d0184f1 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -92,7 +92,7 @@ Default is `memory`. ==== Accepted Formats In much the same way the geo_point type can accept different -representation of the geo point, the filter can accept it as well: +representations of the geo point, the filter can accept it as well: [float] ===== Lat Lon As Properties diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index c6496eac39d..4591ccf5c9e 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -69,7 +69,7 @@ GET /my_locations/location/_search ==== Accepted Formats In much the same way the `geo_point` type can accept different -representation of the geo point, the filter can accept it as well: +representations of the geo point, the filter can accept it as well: [float] ===== Lat Lon As Properties diff --git a/docs/reference/query-dsl/joining-queries.asciidoc b/docs/reference/query-dsl/joining-queries.asciidoc index cfbbf5360b6..6a467fe539a 100644 --- a/docs/reference/query-dsl/joining-queries.asciidoc +++ b/docs/reference/query-dsl/joining-queries.asciidoc @@ -7,7 +7,7 @@ which are designed to scale horizontally. <>:: -Documents may contains fields of type <>. These +Documents may contain fields of type <>. These fields are used to index arrays of objects, where each object can be queried (with the `nested` query) as an independent document. diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index 60477d6e28a..70f976d1d56 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -40,10 +40,6 @@ with default operator of `AND`, the same query is translated to |`allow_leading_wildcard` |When set, `*` or `?` are allowed as the first character. Defaults to `true`. -|`lowercase_expanded_terms` |Whether terms of wildcard, prefix, fuzzy, -and range queries are to be automatically lower-cased or not (since they -are not analyzed). Default it `true`. - |`enable_position_increments` |Set to `true` to enable position increments in result queries. Defaults to `true`. @@ -61,12 +57,12 @@ phrase matches are required. Default value is `0`. |`boost` |Sets the boost value of the query. Defaults to `1.0`. +|`auto_generate_phrase_queries` |Defaults to `false`. + |`analyze_wildcard` |By default, wildcards terms in a query string are not analyzed. By setting this value to `true`, a best effort will be made to analyze those as well. -|`auto_generate_phrase_queries` |Defaults to `false`. - |`max_determinized_states` |Limit on how many automaton states regexp queries are allowed to create. This protects against too-difficult (e.g. exponentially hard) regexps. Defaults to 10000. @@ -80,11 +76,23 @@ both>>. |`lenient` |If set to `true` will cause format based failures (like providing text to a numeric field) to be ignored. -|`locale` | Locale that should be used for string conversions. -Defaults to `ROOT`. - |`time_zone` | Time Zone to be applied to any range query related to dates. See also http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html[JODA timezone]. + +|`quote_field_suffix` | A suffix to append to fields for quoted parts of +the query string. This allows to use a field that has a different analysis chain +for exact matching. Look <> for a +comprehensive example. + +|`split_on_whitespace` |Whether query text should be split on whitespace prior to analysis. + Instead the queryparser would parse around only real 'operators'. + Default to `false`. + +|`all_fields` | Perform the query on all fields detected in the mapping that can +be queried. Will be used by default when the `_all` field is disabled and no +`default_field` is specified (either in the index settings or in the request +body) and no `fields` are specified. + |======================================================================= When a multi term query is being generated, one can control how it gets @@ -99,8 +107,9 @@ When not explicitly specifying the field to search on in the query string syntax, the `index.query.default_field` will be used to derive which field to search on. It defaults to `_all` field. -So, if `_all` field is disabled, it might make sense to change it to set -a different default field. +If the `_all` field is disabled, the `query_string` query will automatically +attempt to determine the existing fields in the index's mapping that are +queryable, and perform the search on those fields. [float] ==== Multi Field diff --git a/docs/reference/query-dsl/query-string-syntax.asciidoc b/docs/reference/query-dsl/query-string-syntax.asciidoc index 9e847102469..9c900959196 100644 --- a/docs/reference/query-dsl/query-string-syntax.asciidoc +++ b/docs/reference/query-dsl/query-string-syntax.asciidoc @@ -61,12 +61,15 @@ they match. Leading wildcards can be disabled by setting `allow_leading_wildcard` to `false`. ======= -Wildcarded terms are not analyzed by default -- they are lowercased -(`lowercase_expanded_terms` defaults to `true`) but no further analysis -is done, mainly because it is impossible to accurately analyze a word that -is missing some of its letters. However, by setting `analyze_wildcard` to -`true`, an attempt will be made to analyze wildcarded words before searching -the term list for matching terms. +Only parts of the analysis chain that operate at the character level are +applied. So for instance, if the analyzer performs both lowercasing and +stemming, only the lowercasing will be applied: it would be wrong to perform +stemming on a word that is missing some of its letters. + +By setting `analyze_wildcard` to true, queries that end with a `*` will be +analyzed and a boolean query will be built out of the different tokens, by +ensuring exact matches on the first N-1 tokens, and prefix match on the last +token. ===== Regular expressions @@ -282,8 +285,8 @@ A space may also be a reserved character. For instance, if you have a synonym list which converts `"wi fi"` to `"wifi"`, a `query_string` search for `"wi fi"` would fail. The query string parser would interpret your query as a search for `"wi OR fi"`, while the token stored in your -index is actually `"wifi"`. Escaping the space will protect it from -being touched by the query string parser: `"wi\ fi"`. +index is actually `"wifi"`. The option `split_on_whitespace=false` will protect it from +being touched by the query string parser and will let the analysis run on the entire input (`"wi fi"`). **** ===== Empty Query diff --git a/docs/reference/query-dsl/regexp-syntax.asciidoc b/docs/reference/query-dsl/regexp-syntax.asciidoc index 68ca5912458..6a929ba98d5 100644 --- a/docs/reference/query-dsl/regexp-syntax.asciidoc +++ b/docs/reference/query-dsl/regexp-syntax.asciidoc @@ -203,7 +203,7 @@ For string `"abcd"`: ===== Optional operators These operators are available by default as the `flags` parameter defaults to `ALL`. -Different flag combinations (concatened with `"\"`) can be used to enable/disable +Different flag combinations (concatenated with `"|"`) can be used to enable/disable specific operators: { diff --git a/docs/reference/query-dsl/simple-query-string-query.asciidoc b/docs/reference/query-dsl/simple-query-string-query.asciidoc index 796f2517fea..c67ba5cd73e 100644 --- a/docs/reference/query-dsl/simple-query-string-query.asciidoc +++ b/docs/reference/query-dsl/simple-query-string-query.asciidoc @@ -44,18 +44,11 @@ creating composite queries. |`flags` |Flags specifying which features of the `simple_query_string` to enable. Defaults to `ALL`. -|`lowercase_expanded_terms` | Whether terms of prefix and fuzzy queries should -be automatically lower-cased or not (since they are not analyzed). Defaults to -`true`. - |`analyze_wildcard` | Whether terms of prefix queries should be automatically analyzed or not. If `true` a best effort will be made to analyze the prefix. However, some analyzers will be not able to provide a meaningful results based just on the prefix of a term. Defaults to `false`. -|`locale` | Locale that should be used for string conversions. -Defaults to `ROOT`. - |`lenient` | If set to `true` will cause format based failures (like providing text to a numeric field) to be ignored. @@ -63,6 +56,15 @@ Defaults to `ROOT`. document to be returned. See the <> documentation for the full list of options. + +|`quote_field_suffix` | A suffix to append to fields for quoted parts of +the query string. This allows to use a field that has a different analysis chain +for exact matching. Look <> for a +comprehensive example. + +|`all_fields` | Perform the query on all fields detected in the mapping that can +be queried. Will be used by default when the `_all` field is disabled and no +`default_field` is specified index settings, and no `fields` are specified. |======================================================================= [float] @@ -87,8 +89,10 @@ When not explicitly specifying the field to search on in the query string syntax, the `index.query.default_field` will be used to derive which field to search on. It defaults to `_all` field. -So, if `_all` field is disabled, it might make sense to change it to set -a different default field. +If the `_all` field is disabled and no `fields` are specified in the request`, +the `simple_query_string` query will automatically attempt to determine the +existing fields in the index's mapping that are queryable, and perform the +search on those fields. [float] ==== Multi Field diff --git a/docs/reference/query-dsl/template-query.asciidoc b/docs/reference/query-dsl/template-query.asciidoc index b4b00e5babd..2d3b5724d49 100644 --- a/docs/reference/query-dsl/template-query.asciidoc +++ b/docs/reference/query-dsl/template-query.asciidoc @@ -108,7 +108,7 @@ GET /_search { "query": { "template": { - "id": "my_template", <1> + "stored": "my_template", <1> "params" : { "query_string" : "all about search" } diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 570568b43a0..d6870f81a10 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -10,6 +10,12 @@ The `_shutdown` API has been removed. Instead, setup Elasticsearch to run as a service (see <>, <>, or <>) or use the `-p` command line option to <>. +[role="exclude",id="indices-upgrade"] +=== Upgrade API + +The `_upgrade` API is no longer useful and will be removed. Instead, see +<>. + [role="exclude",id="docs-bulk-udp"] === Bulk UDP API diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 61d807cc212..7a26ee2cf60 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -92,6 +92,19 @@ timeout. The setting key is `search.default_search_timeout` and can be set using the <> endpoints. Setting this value to `-1` resets the global search timeout to no timeout. +[float] +[[global-search-cancellation]] +== Search Cancellation + +Searches can be cancelled using standard <> +mechanism. By default, a running search only checks if it is cancelled or +not on segment boundaries, therefore the cancellation can be delayed by large +segments. The search cancellation responsiveness can be improved by setting +the dynamic cluster-level setting `search.low_level_cancellation` to `true`. +However, it comes with an additional overhead of more frequent cancellation +checks that can be noticeable on large fast running search queries. Changing this +setting only affects the searches that start after the change is made. + -- include::search/search.asciidoc[] diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index 859455e89b7..d4117e4e96e 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -74,9 +74,6 @@ query. |`lenient` |If set to true will cause format based failures (like providing text to a numeric field) to be ignored. Defaults to false. -|`lowercase_expanded_terms` |Should terms be automatically lowercased or -not. Defaults to `true`. - |`analyze_wildcard` |Should wildcard and prefix queries be analyzed or not. Defaults to `false`. diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc index 558071eedcf..1291af702f5 100644 --- a/docs/reference/search/explain.asciidoc +++ b/docs/reference/search/explain.asciidoc @@ -68,7 +68,7 @@ This will yield the following result: "details" : [ ] }, { "value" : 1.0, - "description" : "_type:tweet, product of:", + "description" : "*:*, product of:", "details" : [ { "value" : 1.0, "description" : "boost", "details" : [ ] }, { "value" : 1.0, "description" : "queryNorm", "details" : [ ] } @@ -136,10 +136,6 @@ This will yield the same result as the previous request. Should wildcard and prefix queries be analyzed or not. Defaults to false. -`lowercase_expanded_terms`:: - Should terms be automatically lowercased - or not. Defaults to true. - `lenient`:: If set to true will cause format based failures (like providing text to a numeric field) to be ignored. Defaults to false. diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 0a03b322858..52b744d30e9 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -118,9 +118,16 @@ This will yield the following result: "rewrite_time": 51443, "collector": [ { - "name": "SimpleTopScoreDocCollector", - "reason": "search_top_hits", - "time": "0.06989100000ms" + "name": "CancellableCollector", + "reason": "search_cancelled", + "time": "0.3043110000ms", + "children": [ + { + "name": "SimpleTopScoreDocCollector", + "reason": "search_top_hits", + "time": "0.03227300000ms" + } + ] } ] } @@ -150,7 +157,8 @@ This will yield the following result: // TESTRESPONSE[s/"build_scorer": 42602/"build_scorer": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.build_scorer/] // TESTRESPONSE[s/"create_weight": 89323/"create_weight": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.create_weight/] // TESTRESPONSE[s/"next_doc": 2852/"next_doc": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.next_doc/] -// TESTRESPONSE[s/"time": "0.06989100000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/] +// TESTRESPONSE[s/"time": "0.3043110000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/] +// TESTRESPONSE[s/"time": "0.03227300000ms"/"time": $body.profile.shards.0.searches.0.collector.0.children.0.time/] // Sorry for this mess.... <1> Search results are returned, but were omitted here for brevity @@ -390,21 +398,30 @@ Looking at the previous example: [source,js] -------------------------------------------------- "collector": [ - { - "name": "SimpleTopScoreDocCollector", - "reason": "search_top_hits", - "time": "0.06989100000ms" - } + { + "name": "CancellableCollector", + "reason": "search_cancelled", + "time": "0.3043110000ms", + "children": [ + { + "name": "SimpleTopScoreDocCollector", + "reason": "search_top_hits", + "time": "0.03227300000ms" + } + ] + } ] -------------------------------------------------- // TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n"query": $body.profile.shards.0.searches.0.query,\n"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time,/] // TESTRESPONSE[s/]$/]}], "aggregations": []}]}}/] -// TESTRESPONSE[s/"time": "0.06989100000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/] +// TESTRESPONSE[s/"time": "0.3043110000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/] +// TESTRESPONSE[s/"time": "0.03227300000ms"/"time": $body.profile.shards.0.searches.0.collector.0.children.0.time/] -We see a single collector named `SimpleTopScoreDocCollector`. This is the default "scoring and sorting" Collector -used by Elasticsearch. The `"reason"` field attempts to give a plain english description of the class name. The +We see a single collector named `SimpleTopScoreDocCollector` wrapped into `CancellableCollector`. `SimpleTopScoreDocCollector` is the default "scoring and sorting" +`Collector` used by Elasticsearch. The `"reason"` field attempts to give a plain english description of the class name. The `"time"` is similar to the time in the Query tree: a wall-clock time inclusive of all children. Similarly, `children` lists -all sub-collectors. +all sub-collectors. The `CancellableCollector` that wraps `SimpleTopScoreDocCollector` is used by elasticsearch to detect if the current +search was cancelled and stop collecting documents as soon as it occurs. It should be noted that Collector times are **independent** from the Query times. They are calculated, combined and normalized independently! Due to the nature of Lucene's execution, it is impossible to "merge" the times diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 2e7bc9f1805..73ade7a47d6 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -15,14 +15,14 @@ GET /_search }, "highlight" : { "fields" : { - "content" : {} + "comment" : {} } } } -------------------------------------------------- // CONSOLE -In the above case, the `content` field will be highlighted for each +In the above case, the `comment` field will be highlighted for each search hit (there will be another element in each search hit, called `highlight`, which includes the highlighted fields and the highlighted fragments). @@ -71,14 +71,14 @@ natural languages, not as well with fields containing for instance html markup * Treats the document as the whole corpus, and scores individual sentences as if they were documents in this corpus, using the BM25 algorithm -Here is an example of setting the `content` field in the index mapping to allow for +Here is an example of setting the `comment` field in the index mapping to allow for highlighting using the postings highlighter on it: [source,js] -------------------------------------------------- { "type_name" : { - "content" : {"index_options" : "offsets"} + "comment" : {"index_options" : "offsets"} } } -------------------------------------------------- @@ -113,7 +113,7 @@ will be used instead of the plain highlighter. The fast vector highlighter: for things like phrase matches being sorted above term matches when highlighting a Boosting Query that boosts phrase matches over term matches -Here is an example of setting the `content` field to allow for +Here is an example of setting the `comment` field to allow for highlighting using the fast vector highlighter on it (this will cause the index to be bigger): @@ -121,7 +121,7 @@ the index to be bigger): -------------------------------------------------- { "type_name" : { - "content" : {"term_vector" : "with_positions_offsets"} + "comment" : {"term_vector" : "with_positions_offsets"} } } -------------------------------------------------- @@ -142,7 +142,7 @@ GET /_search }, "highlight" : { "fields" : { - "content" : {"type" : "plain"} + "comment" : {"type" : "plain"} } } } @@ -163,7 +163,7 @@ GET /_search }, "highlight" : { "fields" : { - "content" : {"force_source" : true} + "comment" : {"force_source" : true} } } } @@ -241,7 +241,7 @@ GET /_search "highlight" : { "tags_schema" : "styled", "fields" : { - "content" : {} + "comment" : {} } } } @@ -271,7 +271,7 @@ GET /_search }, "highlight" : { "fields" : { - "content" : {"fragment_size" : 150, "number_of_fragments" : 3} + "comment" : {"fragment_size" : 150, "number_of_fragments" : 3} } } } @@ -294,7 +294,7 @@ GET /_search "highlight" : { "order" : "score", "fields" : { - "content" : {"fragment_size" : 150, "number_of_fragments" : 3} + "comment" : {"fragment_size" : 150, "number_of_fragments" : 3} } } } @@ -317,7 +317,7 @@ GET /_search "highlight" : { "fields" : { "_all" : {}, - "bio.title" : {"number_of_fragments" : 0} + "blog.title" : {"number_of_fragments" : 0} } } } @@ -345,7 +345,7 @@ GET /_search }, "highlight" : { "fields" : { - "content" : { + "comment" : { "fragment_size" : 150, "number_of_fragments" : 3, "no_match_size": 150 @@ -375,7 +375,7 @@ GET /_search "stored_fields": [ "_id" ], "query" : { "match": { - "content": { + "comment": { "query": "foo bar" } } @@ -385,7 +385,7 @@ GET /_search "query": { "rescore_query" : { "match_phrase": { - "content": { + "comment": { "query": "foo bar", "slop": 1 } @@ -397,21 +397,21 @@ GET /_search "highlight" : { "order" : "score", "fields" : { - "content" : { + "comment" : { "fragment_size" : 150, "number_of_fragments" : 3, "highlight_query": { "bool": { "must": { "match": { - "content": { + "comment": { "query": "foo bar" } } }, "should": { "match_phrase": { - "content": { + "comment": { "query": "foo bar", "slop": 1, "boost": 10.0 @@ -452,9 +452,9 @@ GET /_search "fragment_size" : 150, "fields" : { "_all" : { "pre_tags" : [""], "post_tags" : [""] }, - "bio.title" : { "number_of_fragments" : 0 }, - "bio.author" : { "number_of_fragments" : 0 }, - "bio.content" : { "number_of_fragments" : 5, "order" : "score" } + "blog.title" : { "number_of_fragments" : 0 }, + "blog.author" : { "number_of_fragments" : 0 }, + "blog.comment" : { "number_of_fragments" : 5, "order" : "score" } } } } @@ -508,8 +508,8 @@ ways. All `matched_fields` must have `term_vector` set to combined is loaded so only that field would benefit from having `store` set to `yes`. -In the following examples `content` is analyzed by the `english` -analyzer and `content.plain` is analyzed by the `standard` analyzer. +In the following examples `comment` is analyzed by the `english` +analyzer and `comment.plain` is analyzed by the `standard` analyzer. [source,js] -------------------------------------------------- @@ -517,15 +517,15 @@ GET /_search { "query": { "query_string": { - "query": "content.plain:running scissors", - "fields": ["content"] + "query": "comment.plain:running scissors", + "fields": ["comment"] } }, "highlight": { "order": "score", "fields": { - "content": { - "matched_fields": ["content", "content.plain"], + "comment": { + "matched_fields": ["comment", "comment.plain"], "type" : "fvh" } } @@ -546,14 +546,14 @@ GET /_search "query": { "query_string": { "query": "running scissors", - "fields": ["content", "content.plain^10"] + "fields": ["comment", "comment.plain^10"] } }, "highlight": { "order": "score", "fields": { - "content": { - "matched_fields": ["content", "content.plain"], + "comment": { + "matched_fields": ["comment", "comment.plain"], "type" : "fvh" } } @@ -572,14 +572,14 @@ GET /_search "query": { "query_string": { "query": "running scissors", - "fields": ["content", "content.plain^10"] + "fields": ["comment", "comment.plain^10"] } }, "highlight": { "order": "score", "fields": { - "content": { - "matched_fields": ["content.plain"], + "comment": { + "matched_fields": ["comment.plain"], "type" : "fvh" } } @@ -590,7 +590,7 @@ GET /_search The above query wouldn't highlight "run" or "scissor" but shows that it is just fine not to list the field to which the matches are combined -(`content`) in the matched fields. +(`comment`) in the matched fields. [NOTE] Technically it is also fine to add fields to `matched_fields` that @@ -606,7 +606,7 @@ There is a small amount of overhead involved with setting -------------------------------------------------- "highlight": { "fields": { - "content": {} + "comment": {} } } -------------------------------------------------- @@ -615,8 +615,8 @@ to -------------------------------------------------- "highlight": { "fields": { - "content": { - "matched_fields": ["content"], + "comment": { + "matched_fields": ["comment"], "type" : "fvh" } } diff --git a/docs/reference/search/request/preference.asciidoc b/docs/reference/search/request/preference.asciidoc index d5237812b98..a0a2809a6e0 100644 --- a/docs/reference/search/request/preference.asciidoc +++ b/docs/reference/search/request/preference.asciidoc @@ -34,7 +34,7 @@ The `preference` is a query string parameter which can be set to: `_shards:2,3`:: Restricts the operation to the specified shards. (`2` and `3` in this case). This preference can be combined with other - preferences but it has to appear first: `_shards:2,3;_primary` + preferences but it has to appear first: `_shards:2,3|_primary` `_only_nodes`:: Restricts the operation to nodes specified in node specification diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index d924a56b652..82a27881720 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -38,16 +38,18 @@ should keep the ``search context'' alive (see <>), eg `?s [source,js] -------------------------------------------------- -curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d ' +POST /twitter/tweet/_search?scroll=1m { + "size": 100, "query": { "match" : { "title" : "elasticsearch" } } } -' -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] The result from the above request includes a `_scroll_id`, which should be passed to the `scroll` API in order to retrieve the next batch of @@ -55,13 +57,14 @@ results. [source,js] -------------------------------------------------- -curl -XGET <1> 'localhost:9200/_search/scroll' <2> -d' +POST <1> /_search/scroll <2> { "scroll" : "1m", <3> - "scroll_id" : "c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1" <4> + "scroll_id" : "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==" <4> } -' -------------------------------------------------- +// CONSOLE +// TEST[continued s/DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==/$body._scroll_id/] <1> `GET` or `POST` can be used. <2> The URL should not include the `index` or `type` name -- these @@ -70,16 +73,10 @@ curl -XGET <1> 'localhost:9200/_search/scroll' <2> -d' for another `1m`. <4> The `scroll_id` parameter -Each call to the `scroll` API returns the next batch of results until there -are no more results left to return, ie the `hits` array is empty. - -For backwards compatibility, `scroll_id` and `scroll` can be passed in the query string. -And the `scroll_id` can be passed in the request body - -[source,js] --------------------------------------------------- -curl -XGET 'localhost:9200/_search/scroll?scroll=1m' -d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1' --------------------------------------------------- +The `size` parameter allows you to configure the maximum number of hits to be +returned with each batch of results. Each call to the `scroll` API returns the +next batch of results until there are no more results left to return, ie the +`hits` array is empty. IMPORTANT: The initial search request and each subsequent scroll request returns a new `_scroll_id` -- only the most recent `_scroll_id` should be @@ -94,14 +91,15 @@ order, this is the most efficient option: [source,js] -------------------------------------------------- -curl -XGET 'localhost:9200/_search?scroll=1m' -d ' +GET /_search?scroll=1m { "sort": [ "_doc" ] } -' -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] [[scroll-search-context]] ==== Keeping the search context alive @@ -130,8 +128,9 @@ You can check how many search contexts are open with the [source,js] --------------------------------------- -curl -XGET localhost:9200/_nodes/stats/indices/search?pretty +GET /_nodes/stats/indices/search --------------------------------------- +// CONSOLE ==== Clear scroll API @@ -143,37 +142,46 @@ cleared as soon as the scroll is not being used anymore using the [source,js] --------------------------------------- -curl -XDELETE localhost:9200/_search/scroll -d ' +DELETE /_search/scroll { - "scroll_id" : ["c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1"] -}' + "scroll_id" : ["DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ=="] +} --------------------------------------- +// CONSOLE +// TEST[catch:missing] Multiple scroll IDs can be passed as array: [source,js] --------------------------------------- -curl -XDELETE localhost:9200/_search/scroll -d ' +DELETE /_search/scroll { - "scroll_id" : ["c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1", "aGVuRmV0Y2g7NTsxOnkxaDZ"] -}' + "scroll_id" : [ + "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", + "DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB" + ] +} --------------------------------------- +// CONSOLE +// TEST[catch:missing] All search contexts can be cleared with the `_all` parameter: [source,js] --------------------------------------- -curl -XDELETE localhost:9200/_search/scroll/_all +DELETE /_search/scroll/_all --------------------------------------- +// CONSOLE The `scroll_id` can also be passed as a query string parameter or in the request body. Multiple scroll IDs can be passed as comma separated values: [source,js] --------------------------------------- -curl -XDELETE localhost:9200/_search/scroll \ - -d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1,aGVuRmV0Y2g7NTsxOnkxaDZ' +DELETE /_search/scroll/DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==,DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB --------------------------------------- +// CONSOLE +// TEST[catch:missing] [[sliced-scroll]] ==== Sliced Scroll diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index d3bbe283a2d..5a7c3a51f01 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -243,8 +243,20 @@ GET /_search How to compute the distance. Can either be `sloppy_arc` (default), `arc` (slightly more precise but significantly slower) or `plane` (faster, but inaccurate on long distances and close to the poles). -Note: the geo distance sorting supports `sort_mode` options: `min`, -`max` and `avg`. +`sort_mode`:: + + What to do in case a field has several geo points. By default, the shortest + distance is taken into account when sorting in ascending order and the + longest distance when sorting in descending order. Supported values are + `min`, `max`, `median` and `avg`. + +`unit`:: + + The unit to use when computing sort values. The default is `m` (meters). + +NOTE: geo distance sorting does not support configurable missing values: the +distance will always be considered equal to +Infinity+ when a document does not +have values for the field that is used for distance computation. The following formats are supported in providing the coordinates: diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 60065ce96bb..4589b1552ec 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -202,24 +202,66 @@ The configured weight for a suggestion is returned as `_score`. The `text` field uses the `input` of your indexed suggestion. Suggestions return the full document `_source` by default. The size of the `_source` can impact performance due to disk fetch and network transport overhead. -For best performance, filter out unnecessary fields from the `_source` +To save some network overhead, filter out unnecessary fields from the `_source` using <> to minimize -`_source` size. The following demonstrates an example completion query -with source filtering: +`_source` size. Note that the _suggest endpoint doesn't support source +filtering but using suggest on the `_search` endpoint does: [source,js] -------------------------------------------------- -POST music/_suggest +POST music/_search?size=0 { - "_source": "completion.*", - "song-suggest" : { - "prefix" : "nir", - "completion" : { - "field" : "suggest" + "_source": "suggest", + "suggest": { + "song-suggest" : { + "prefix" : "nir", + "completion" : { + "field" : "suggest" + } } } } -------------------------------------------------- +// CONSOLE +// TEST[continued] + +Which should look like: + +[source,js] +-------------------------------------------------- +{ + "took": 6, + "timed_out": false, + "_shards" : { + "total" : 5, + "successful" : 5, + "failed" : 0 + }, + "hits": { + "total" : 0, + "max_score" : 0.0, + "hits" : [] + }, + "suggest": { + "song-suggest" : [ { + "text" : "nir", + "offset" : 0, + "length" : 3, + "options" : [ { + "text" : "Nirvana", + "_index": "music", + "_type": "song", + "_id": "1", + "_score": 1.0, + "_source": { + "suggest": ["Nevermind", "Nirvana"] + } + } ] + } ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 6,/"took": $body.took,/] The basic completion suggester query supports the following parameters: diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc index 95ce6a8ff6a..6670b9f31d5 100644 --- a/docs/reference/search/uri-request.asciidoc +++ b/docs/reference/search/uri-request.asciidoc @@ -64,9 +64,6 @@ query. |`analyzer` |The analyzer name to be used when analyzing the query string. -|`lowercase_expanded_terms` |Should terms be automatically lowercased or -not. Defaults to `true`. - |`analyze_wildcard` |Should wildcard and prefix queries be analyzed or not. Defaults to `false`. diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index 5fb4ad9b7ce..5b015f4e578 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -52,9 +52,6 @@ query. |`lenient` |If set to true will cause format based failures (like providing text to a numeric field) to be ignored. Defaults to false. -|`lowercase_expanded_terms` |Should terms be automatically lowercased or -not. Defaults to `true`. - |`analyze_wildcard` |Should wildcard and prefix queries be analyzed or not. Defaults to `false`. |======================================================================= diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index 0f88d03401c..75eeab65036 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -23,14 +23,19 @@ documented individually. [float] === Development vs. production mode -By default, Elasticsearch binds and publishes to `localhost`. This is +By default, Elasticsearch binds to `localhost` for <> +and <> communication. This is fine for downloading and playing with Elasticsearch, and everyday -development but it's useless for production systems. For a production -installation to be reachable, it must either bind or publish to an -external interface. Thus, we consider Elasticsearch to be in development -mode if it does not bind nor publish to an external interface (the -default), and is otherwise in production mode if it does bind or publish -to an external interface. +development but it's useless for production systems. To form a cluster, +Elasticsearch instances must be reachable via transport communication so +they must bind transport to an external interface. Thus, we consider an +Elaticsearch instance to be in development mode if it does not bind +transport to an external interface (the default), and is otherwise in +production mode if it does bind transport to an external interface. Note +that HTTP can be configured independently of transport via +<> and <>; +this can be useful for configuring a single instance to be reachable via +HTTP for testing purposes without triggering production mode. === Heap size check @@ -51,7 +56,7 @@ File descriptors are a Unix construct for tracking open "files". In Unix though, https://en.wikipedia.org/wiki/Everything_is_a_file[everything is a file]. For example, "files" could be a physical file, a virtual file (e.g., `/proc/loadavg`), or network sockets. Elasticsearch requires -lots file descriptors (e.g., every shard is composed of multiple +lots of file descriptors (e.g., every shard is composed of multiple segments and other files, plus connections to other nodes, etc.). This bootstrap check is enforced on OS X and Linux. To pass the file descriptor check, you might have to configure <> request: @@ -41,19 +46,28 @@ POST _flush/synced A synced flush request is a ``best effort'' operation. It will fail if there are any pending indexing operations, but it is safe to reissue the request multiple times if necessary. +-- -==== Step 3: Shutdown and upgrade all nodes +. *Shutdown and upgrade all nodes* ++ +-- Stop all Elasticsearch services on all nodes in the cluster. Each node can be upgraded following the same procedure described in <>. +-- -==== Step 4: Upgrade any plugins +. *Upgrade any plugins* ++ +-- Elasticsearch plugins must be upgraded when upgrading a node. Use the `elasticsearch-plugin` script to install the correct version of any plugins that you need. +-- -==== Step 5: Start the cluster +. *Start the cluster* ++ +-- If you have dedicated master nodes -- nodes with `node.master` set to `true`(the default) and `node.data` set to `false` -- then it is a good idea @@ -75,8 +89,11 @@ GET _cat/nodes // CONSOLE Use these APIs to check that all nodes have successfully joined the cluster. +-- -==== Step 6: Wait for yellow +. *Wait for yellow* ++ +-- As soon as each node has joined the cluster, it will start to recover any primary shards that are stored locally. Initially, the @@ -87,8 +104,11 @@ Once each node has recovered its local shards, the `status` will become `yellow`, meaning all primary shards have been recovered, but not all replica shards are allocated. This is to be expected because allocation is still disabled. +-- -==== Step 7: Reenable allocation +. *Reenable allocation* ++ +-- Delaying the allocation of replicas until all nodes have joined the cluster allows the master to allocate replicas to nodes which already have local shard @@ -124,3 +144,4 @@ GET _cat/recovery Once the `status` column in the `_cat/health` output has reached `green`, all primary and replica shards have been successfully allocated. +-- diff --git a/docs/reference/setup/important-settings.asciidoc b/docs/reference/setup/important-settings.asciidoc index aecd205b613..63f988325a7 100644 --- a/docs/reference/setup/important-settings.asciidoc +++ b/docs/reference/setup/important-settings.asciidoc @@ -12,7 +12,6 @@ configured before going into production. * <> * <> * <> -* <> [float] [[path-settings]] @@ -70,7 +69,7 @@ environments, otherwise you might end up with nodes joining the wrong cluster. [[node.name]] === `node.name` -By default, Elasticsearch will take the 7 first charachter of the randomly generated uuid used as the node id. +By default, Elasticsearch will take the 7 first character of the randomly generated uuid used as the node id. Note that the node id is persisted and does not change when a node restarts and therefore the default node name will also not change. @@ -185,32 +184,3 @@ discovery.zen.minimum_master_nodes: 2 IMPORTANT: If `discovery.zen.minimum_master_nodes` is not set when Elasticsearch is running in <>, an exception will be thrown which will prevent the node from starting. - -[float] -[[node.max_local_storage_nodes]] -=== `node.max_local_storage_nodes` - -It is possible to start more than one node on the same server from the same -`$ES_HOME`, just by doing the following: - -[source,sh] --------------------------------------------------- -./bin/elasticsearch -d -./bin/elasticsearch -d --------------------------------------------------- - -This works just fine: the data directory structure is designed to let multiple -nodes coexist. However, a single instance of Elasticsearch is able to use all -of the resources of a single server and it seldom makes sense to run multiple -nodes on the same server in production. - -It is, however, possible to start more than one node on the same server by -mistake and to be completely unaware that this problem exists. To prevent more -than one node from sharing the same data directory, it is advisable to add the -following setting: - -[source,yaml] --------------------------------------------------- -node.max_local_storage_nodes: 1 --------------------------------------------------- - diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index 578dcb5db43..c1d0425b430 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -9,6 +9,10 @@ The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. +NOTE: Elasticsearch requires Java 8 or later. Use the +http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] +or an open-source distribution such as http://openjdk.java.net[OpenJDK]. + [[deb-key]] ==== Import the Elasticsearch PGP Key @@ -22,6 +26,14 @@ wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add [[deb-repo]] ==== Installing from the APT repository +ifeval::["{release-state}"=="unreleased"] + +Version {version} of Elasticsearch has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + You may need to install the `apt-transport-https` package on Debian before proceeding: [source,sh] @@ -31,11 +43,25 @@ sudo apt-get install apt-transport-https Save the repository definition to +/etc/apt/sources.list.d/elastic-{major-version}.list+: +ifeval::["{release-state}"=="released"] + ["source","sh",subs="attributes,callouts"] -------------------------------------------------- echo "deb https://artifacts.elastic.co/packages/{major-version}-prerelease/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-{major-version}.list -------------------------------------------------- +endif::[] + +ifeval::["{release-state}"=="prerelease"] + +["source","sh",subs="attributes,callouts"] +-------------------------------------------------- +echo "deb https://artifacts.elastic.co/packages/{major-version}/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-{major-version}.list +-------------------------------------------------- + +endif::[] + + [WARNING] ================================================== @@ -68,10 +94,20 @@ Duplicate sources.list entry https://artifacts.elastic.co/packages/{major-versio Examine +/etc/apt/sources.list.d/elasticsearch-{major-version}.list+ for the duplicate entry or locate the duplicate entry amongst the files in `/etc/apt/sources.list.d/` and the `/etc/apt/sources.list` file. ================================================== +endif::[] + [[install-deb]] ==== Download and install the Debian package manually +ifeval::["{release-state}"=="unreleased"] + +Version {version} of Elasticsearch has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + The Debian package for Elastisearch v{version} can be downloaded from the website and installed as follows: ["source","sh",subs="attributes"] @@ -83,6 +119,8 @@ sudo dpkg -i elasticsearch-{version}.deb <1> Compare the SHA produced by `sha1sum` or `shasum` with the https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.deb.sha1[published SHA]. +endif::[] + include::init-systemd.asciidoc[] [[deb-running-init]] @@ -174,6 +212,7 @@ locations for a Debian-based system: | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | /usr/share/elasticsearch/plugins + | | repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. diff --git a/docs/reference/setup/install/init-systemd.asciidoc b/docs/reference/setup/install/init-systemd.asciidoc index 3e252ca94cd..1532c5313ae 100644 --- a/docs/reference/setup/install/init-systemd.asciidoc +++ b/docs/reference/setup/install/init-systemd.asciidoc @@ -1,7 +1,7 @@ ==== SysV `init` vs `systemd` Elasticsearch is not started automatically after installation. How to start -and stop Elasticsearch depends on whether your sytem uses SysV `init` or +and stop Elasticsearch depends on whether your system uses SysV `init` or `systemd` (used by newer distributions). You can tell which is being used by running this command: diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index 785c0897c35..acf9d6f2418 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -13,6 +13,10 @@ The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. +NOTE: Elasticsearch requires Java 8 or later. Use the +http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] +or an open-source distribution such as http://openjdk.java.net[OpenJDK]. + [[rpm-key]] ==== Import the Elasticsearch PGP Key @@ -26,10 +30,20 @@ rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch [[rpm-repo]] ==== Installing from the RPM repository +ifeval::["{release-state}"=="unreleased"] + +Version {version} of Elasticsearch has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + Create a file called `elasticsearch.repo` in the `/etc/yum.repos.d/` directory for RedHat based distributions, or in the `/etc/zypp/repos.d/` directory for OpenSuSE based distributions, containing: +ifeval::["{release-state}"=="released"] + ["source","sh",subs="attributes,callouts"] -------------------------------------------------- [elasticsearch-{major-version}] @@ -42,6 +56,24 @@ autorefresh=1 type=rpm-md -------------------------------------------------- +endif::[] + +ifeval::["{release-state}"=="prerelease"] + +["source","sh",subs="attributes,callouts"] +-------------------------------------------------- +[elasticsearch-{major-version}] +name=Elasticsearch repository for {major-version} packages +baseurl=https://artifacts.elastic.co/packages/{major-version}/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=1 +autorefresh=1 +type=rpm-md +-------------------------------------------------- + +endif::[] + And your repository is ready for use. You can now install Elasticsearch with one of the following commands: [source,sh] @@ -54,9 +86,19 @@ sudo zypper install elasticsearch <3> <2> Use `dnf` on Fedora and other newer Red Hat distributions. <3> Use `zypper` on OpenSUSE based distributions +endif::[] + [[install-rpm]] ==== Download and install the RPM manually +ifeval::["{release-state}"=="unreleased"] + +Version {version} of Elasticsearch has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + The RPM for Elastisearch v{version} can be downloaded from the website and installed as follows: ["source","sh",subs="attributes"] @@ -68,6 +110,8 @@ sudo rpm --install elasticsearch-{version}.rpm <1> Compare the SHA produced by `sha1sum` or `shasum` with the https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.rpm.sha1[published SHA]. +endif::[] + include::init-systemd.asciidoc[] [[rpm-running-init]] @@ -160,6 +204,7 @@ locations for an RPM-based system: | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | /usr/share/elasticsearch/plugins + | | repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. diff --git a/docs/reference/setup/install/sysconfig-file.asciidoc b/docs/reference/setup/install/sysconfig-file.asciidoc index 8cf3482bf33..1ab0057f01e 100644 --- a/docs/reference/setup/install/sysconfig-file.asciidoc +++ b/docs/reference/setup/install/sysconfig-file.asciidoc @@ -17,7 +17,7 @@ `MAX_LOCKED_MEMORY`:: - Maximum locked memory size. Set to `unlimited if you use the + Maximum locked memory size. Set to `unlimited` if you use the `bootstrap.memory_lock` option in elasticsearch.yml. `MAX_MAP_COUNT`:: diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index 22e3c3b8d42..d33558b913f 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -10,20 +10,34 @@ link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. +NOTE: Elasticsearch requires Java 8 or later. Use the +http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] +or an open-source distribution such as http://openjdk.java.net[OpenJDK]. + [[install-windows]] ==== Download and install the `.zip` package +ifeval::["{release-state}"=="unreleased"] + +Version {version} of Elasticsearch has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + Download the `.zip` archive for Elasticsearch v{version} from: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip Unzip it with your favourite unzip tool. This will create a folder called +elasticsearch-{version}+, which we will refer to as `%ES_HOME%`. In a terminal -window, `CD` to the `%ES_HOME%` directory, for instance: +window, `cd` to the `%ES_HOME%` directory, for instance: ["source","sh",subs="attributes"] ---------------------------- -CD c:\elasticsearch-{version} +cd c:\elasticsearch-{version} ---------------------------- +endif::[] + [[windows-running]] ==== Running Elasticsearch from the command line @@ -71,7 +85,7 @@ stop the service, all from the command-line. ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -c:\elasticsearch-{version}{backslash}bin>service +c:\elasticsearch-{version}{backslash}bin>elasticsearch-service Usage: elasticsearch-service.bat install|remove|start|stop|manager [SERVICE_ID] -------------------------------------------------- @@ -99,7 +113,7 @@ information is made available during install: ["source","sh",subs="attributes"] -------------------------------------------------- -c:\elasticsearch-{version}{backslash}bin>service install +c:\elasticsearch-{version}{backslash}bin>elasticsearch-service install Installing service : "elasticsearch-service-x64" Using JAVA_HOME (64-bit): "c:\jvm\jdk1.8" The service 'elasticsearch-service-x64' has been installed. @@ -234,6 +248,7 @@ directory so that you do not delete important data later on. | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | %ES_HOME%\plugins + | | repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. diff --git a/docs/reference/setup/install/zip-targz.asciidoc b/docs/reference/setup/install/zip-targz.asciidoc index efbfc50f7a1..57c5f220e54 100644 --- a/docs/reference/setup/install/zip-targz.asciidoc +++ b/docs/reference/setup/install/zip-targz.asciidoc @@ -10,10 +10,23 @@ link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. +NOTE: Elasticsearch requires Java 8 or later. Use the +http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] +or an open-source distribution such as http://openjdk.java.net[OpenJDK]. + [[install-zip]] ==== Download and install the `.zip` package -The `.zip` archive for Elastisearch v{version} can be downloaded and installed as follows: +ifeval::["{release-state}"=="unreleased"] + +Version {version} of Elasticsearch has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + +The `.zip` archive for Elasticsearch v{version} can be downloaded and installed as follows: + ["source","sh",subs="attributes"] -------------------------------------------- @@ -26,10 +39,21 @@ cd elasticsearch-{version}/ <2> https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip.sha1[published SHA]. <2> This directory is known as `$ES_HOME`. +endif::[] + + [[install-targz]] ==== Download and install the `.tar.gz` package -The `.tar.gz` archive for Elastisearch v{version} can be downloaded and installed as follows: +ifeval::["{release-state}"=="unreleased"] + +Version {version} of Elasticsearch has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + +The `.tar.gz` archive for Elasticsearch v{version} can be downloaded and installed as follows: ["source","sh",subs="attributes"] -------------------------------------------- @@ -42,6 +66,8 @@ cd elasticsearch-{version}/ <2> https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.tar.gz.sha1[published SHA]. <2> This directory is known as `$ES_HOME`. +endif::[] + [[zip-targz-running]] ==== Running Elasticsearch from the command line @@ -149,6 +175,7 @@ directory so that you do not delete important data later on. | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | $ES_HOME/plugins + | | repo | Shared file system repository locations. Can hold multiple locations. A file system repository can be placed in to any subdirectory of any directory specified here. @@ -163,4 +190,4 @@ directory so that you do not delete important data later on. |======================================================================= -include::next-steps.asciidoc[] \ No newline at end of file +include::next-steps.asciidoc[] diff --git a/docs/reference/setup/reindex_upgrade.asciidoc b/docs/reference/setup/reindex_upgrade.asciidoc new file mode 100644 index 00000000000..f9e7a60ee5b --- /dev/null +++ b/docs/reference/setup/reindex_upgrade.asciidoc @@ -0,0 +1,106 @@ +[[reindex-upgrade]] +=== Reindex to upgrade + +Elasticsearch is able to use indices created in the previous major version +only. For instance, Elasticsearch 6.x can use indices created in +Elasticsearch 5.x, but not those created in Elasticsearch 2.x or before. + +NOTE: Elasticsearch 6.x nodes will fail to start in the presence of too old indices. + +If you are running an Elasticsearch 5.x cluster which contains indices that +were created before 5.x, you will either need to delete those old indices or +to reindex them before upgrading to 6.x. See <>. + +If you are running an Elasticsearch 2.x cluster or older, you have two options: + +* First upgrade to Elasticsearch 5.x, reindex the old indices, then upgrade + to 6.x. See <>. + +* Create a new 6.x cluster and use reindex-from-remote to import indices + directly from the 2.x cluster. See <>. + +.Time-based indices and retention periods +******************************************* + +For many use cases with time-based indices, you will not need to worry about +carrying old 2.x indices with you to 6.x. Data in time-based indices usually +becomes less interesting as time passes. Old indices can be deleted once they +fall outside of your retention period. + +Users in this position can continue to use 5.x until all old 2.x indices have +been deleted, then upgrade to 6.x directly. + +******************************************* + + +[[reindex-upgrade-inplace]] +==== Reindex in place + +If you are running a 5.x cluster which contains indices created in +Elasticsearch 2.x, you will need to reindex (or delete) those indices before +upgrading to Elasticsearch 6.x. + +The reindex process works as follows: + +* Create a new index, copying the mappings and settings from the old index. + Set the `refresh_interval` to `-1` and the `number_of_replicas` to `0` for + efficient reindexing. + +* Reindex all documents from the old index to the new index using the + <>. + +* Reset the `refresh_interval` and `number_of_replicas` to the values + used in the old index, and wait for the index to become green. + +* In a single <> request: + + * Delete the old index. + * Add an alias with the old index name to the new index. + * Add any aliases that existed on the old index to the new index. + +At the end of this process, you will have a new 5.x index which can be used +by an Elasticsearch 6.x cluster. + +[[reindex-upgrade-remote]] +==== Upgrading with reindex-from-remote + +If you are running a 1.x or 2.x cluster and would like to migrate directly to 6.x +without first migrating to 5.x, you can do so using +<>. + +[WARNING] +============================================= + +Elasticsearch includes backwards compatibility code that allows indices from +the previous major version to be upgraded to the current major version. By +moving directly from Elasticsearch 2.x or before to 6.x, you will have to solve any +backwards compatibility issues yourself. + +============================================= + +You will need to set up a 6.x cluster alongside your existing old cluster. +The 6.x cluster needs to have access to the REST API of the old cluster. + +For each old index that you want to transfer to the 6.x cluster, you will need +to: + +* Create a new index in 6.x with the appropriate mappings and settings. Set + the `refresh_interval` to `-1` and set `number_of_replicas` to `0` for + faster reindexing. + +* Use <> to pull documents from the + old index into the new 6.x index. + +* If you run the reindex job in the background (with `wait_for_completion` set + to `false`), the reindex request will return a `task_id` which can be used to + monitor progress of the reindex job in the <>: + `GET _tasks/TASK_ID`. + +* Once reindex has completed, set the `refresh_interval` and + `number_of_replicas` to the desired values (the defaults are `30s` and `1` + respectively). + +* Once the new index has finished replication, you can delete the old index. + +The 6.x cluster can start out small, and you can gradually move nodes from the +old cluster to the 6.x cluster as you migrate indices across. diff --git a/docs/reference/setup/rolling_upgrade.asciidoc b/docs/reference/setup/rolling_upgrade.asciidoc index 5ba578ce768..981f2c240d3 100644 --- a/docs/reference/setup/rolling_upgrade.asciidoc +++ b/docs/reference/setup/rolling_upgrade.asciidoc @@ -12,7 +12,9 @@ supported for your version of Elasticsearch. To perform a rolling upgrade: -==== Step 1: Disable shard allocation +. *Disable shard allocation* ++ +-- When you shut down a node, the allocation process will wait for one minute before starting to replicate the shards that were on that node to other nodes @@ -30,8 +32,11 @@ PUT _cluster/settings -------------------------------------------------- // CONSOLE // TEST[skip:indexes don't assign] +-- -==== Step 2: Stop non-essential indexing and perform a synced flush (Optional) +. *Stop non-essential indexing and perform a synced flush (Optional)* ++ +-- You may happily continue indexing during the upgrade. However, shard recovery will be much faster if you temporarily stop non-essential indexing and issue a @@ -46,9 +51,11 @@ POST _flush/synced A synced flush request is a ``best effort'' operation. It will fail if there are any pending indexing operations, but it is safe to reissue the request multiple times if necessary. +-- -[[upgrade-node]] -==== Step 3: Stop and upgrade a single node +. [[upgrade-node]] *Stop and upgrade a single node* ++ +-- Shut down one of the nodes in the cluster *before* starting the upgrade. @@ -87,14 +94,20 @@ To upgrade using a zip or compressed tarball: * Either copy the files in the `data` directory from your old installation to your new installation, or configure the location of the data directory in the `config/elasticsearch.yml` file, with the `path.data` setting. +-- -==== Step 4: Upgrade any plugins +. *Upgrade any plugins* ++ +-- Elasticsearch plugins must be upgraded when upgrading a node. Use the `elasticsearch-plugin` script to install the correct version of any plugins that you need. +-- -==== Step 5: Start the upgraded node +. *Start the upgraded node* ++ +-- Start the now upgraded node and confirm that it joins the cluster by checking the log file or by checking the output of this request: @@ -104,8 +117,11 @@ the log file or by checking the output of this request: GET _cat/nodes -------------------------------------------------- // CONSOLE +-- -==== Step 6: Reenable shard allocation +. *Reenable shard allocation* ++ +-- Once the node has joined the cluster, reenable shard allocation to start using the node: @@ -120,8 +136,11 @@ PUT _cluster/settings } -------------------------------------------------- // CONSOLE +-- -==== Step 7: Wait for the node to recover +. *Wait for the node to recover* ++ +-- You should wait for the cluster to finish shard allocation before upgrading the next node. You can check on progress with the <> @@ -168,8 +187,12 @@ GET _cat/recovery If you stopped indexing, then it is safe to resume indexing as soon as recovery has completed. +-- -==== Step 8: Repeat +. *Repeat* ++ +-- When the cluster is stable and the node has recovered, repeat the above steps for all remaining nodes. +-- diff --git a/docs/reference/setup/sysconfig/file-descriptors.asciidoc b/docs/reference/setup/sysconfig/file-descriptors.asciidoc index 25a2214146c..f4bc95749ae 100644 --- a/docs/reference/setup/sysconfig/file-descriptors.asciidoc +++ b/docs/reference/setup/sysconfig/file-descriptors.asciidoc @@ -1,6 +1,12 @@ [[file-descriptors]] === File Descriptors +[NOTE] +This is only relevant for Linux and macOS and can be safely ignored if running +Elasticsearch on Windows. On Windows that JVM uses an +https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx[API] +limited only by available resources. + Elasticsearch uses a lot of file descriptors or file handles. Running out of file descriptors can be disastrous and will most probably lead to data loss. Make sure to increase the limit on the number of open files descriptors for diff --git a/docs/reference/setup/sysconfig/heap_size.asciidoc b/docs/reference/setup/sysconfig/heap_size.asciidoc index f54ca7813a2..55fb95bc7e6 100644 --- a/docs/reference/setup/sysconfig/heap_size.asciidoc +++ b/docs/reference/setup/sysconfig/heap_size.asciidoc @@ -67,7 +67,7 @@ ES_JAVA_OPTS="-Xms4000m -Xmx4000m" ./bin/elasticsearch <2> <2> Set the minimum and maximum heap size to 4000 MB. NOTE: Configuring the heap for the <> -is different than the above. The values initiallly populated for the +is different than the above. The values initially populated for the Windows service can be configured as above but are different after the service has been installed. Consult the <> for additional diff --git a/docs/reference/setup/upgrade.asciidoc b/docs/reference/setup/upgrade.asciidoc index 03bce8a91fb..d5c649e9992 100644 --- a/docs/reference/setup/upgrade.asciidoc +++ b/docs/reference/setup/upgrade.asciidoc @@ -1,5 +1,5 @@ [[setup-upgrade]] -== Upgrading +== Upgrading Elasticsearch [IMPORTANT] =========================================== @@ -22,14 +22,34 @@ consult this table: [cols="1> (where `y > x`) +|< 5.x |6.x |<> |5.x |5.y |<> (where `y > x`) -|2.x |5.x |<> -|5.0.0-alpha1 |5.y |<> -|5.0.0-alpha2 |5.y |<> -|5.0.0-beta1 |5.y |<> +|5.x |6.x |<> +|6.0.0 pre GA |6.x |<> +|6.x |6.y |<> (where `y > x`) |======================================================================= +[IMPORTANT] +.Indices created in Elasticsearch 2.x or before +=============================================== + +Elasticsearch is able to read indices created in the *previous major version +only*. For instance, Elasticsearch 6.x can use indices created in +Elasticsearch 5.x, but not those created in Elasticsearch 2.x or before. + +This condition also applies to indices backed up with +<>. If an index was originally +created in 2.x, it cannot be restored into a 6.x cluster even if the +snapshot was made by a 5.x cluster. + +Elasticsearch 6.x nodes will fail to start in the presence of too old indices. + +See <> for more information about how to upgrade old indices. +=============================================== + + include::rolling_upgrade.asciidoc[] include::cluster_restart.asciidoc[] + +include::reindex_upgrade.asciidoc[] \ No newline at end of file diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc index 0bf99b2fafa..94aa6d4b42b 100644 --- a/docs/reference/testing/testing-framework.asciidoc +++ b/docs/reference/testing/testing-framework.asciidoc @@ -30,11 +30,10 @@ First, you need to include the testing dependency in your project, along with th test - org.elasticsearch - elasticsearch + org.elasticsearch.test + framework ${elasticsearch.version} test - test-jar -------------------------------------------------- @@ -253,7 +252,7 @@ Usually, you would combine assertions and matchers in your test like this [source,java] ---------------------------- -SearchResponse seearchResponse = client().prepareSearch() ...; +SearchResponse searchResponse = client().prepareSearch() ...; assertHitCount(searchResponse, 4); assertFirstHit(searchResponse, hasId("4")); assertSearchHits(searchResponse, "1", "2", "3", "4"); diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 47ca68e00f5..0ded9530e0e 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -153,10 +153,10 @@ The new tests are run continuously in our testing farm and are passing. We are a that no failures are found. -== Unreleased +== Completed [float] -=== Port Jepsen tests dealing with loss of acknowledged writes to our testing framework (STATUS: UNRELEASED, V5.0.0) +=== Port Jepsen tests dealing with loss of acknowledged writes to our testing framework (STATUS: DONE, V5.0.0) We have increased our test coverage to include scenarios tested by Jepsen that demonstrate loss of acknowledged writes, as described in the Elasticsearch related blogs. We make heavy use of randomization to expand on the scenarios that can be tested and to introduce @@ -167,7 +167,7 @@ where the `testAckedIndexing` test was specifically added to check that we don't [float] -=== Loss of documents during network partition (STATUS: UNRELEASED, v5.0.0) +=== Loss of documents during network partition (STATUS: DONE, v5.0.0) If a network partition separates a node from the master, there is some window of time before the node detects it. The length of the window is dependent on the type of the partition. This window is extremely small if a socket is broken. More adversarial partitions, for example, silently dropping requests without breaking the socket can take longer (up to 3x30s using current defaults). @@ -175,7 +175,7 @@ If the node hosts a primary shard at the moment of partition, and ends up being To prevent this situation, the primary needs to wait for the master to acknowledge replica shard failures before acknowledging the write to the client. {GIT}14252[#14252] [float] -=== Safe primary relocations (STATUS: UNRELEASED, v5.0.0) +=== Safe primary relocations (STATUS: DONE, v5.0.0) When primary relocation completes, a cluster state is propagated that deactivates the old primary and marks the new primary as active. As cluster state changes are not applied synchronously on all nodes, there can be a time interval where the relocation target has processed the @@ -189,7 +189,7 @@ on the relocation target, each of the nodes believes the other to be the active chasing the primary being quickly sent back and forth between the nodes, potentially making them both go OOM. {GIT}12573[#12573] [float] -=== Do not allow stale shards to automatically be promoted to primary (STATUS: UNRELEASED, v5.0.0) +=== Do not allow stale shards to automatically be promoted to primary (STATUS: DONE, v5.0.0) In some scenarios, after the loss of all valid copies, a stale replica shard can be automatically assigned as a primary, preferring old data to no data at all ({GIT}14671[#14671]). This can lead to a loss of acknowledged writes if the valid copies are not lost but are rather @@ -199,7 +199,7 @@ for one of the good shard copies to reappear. In case where all good copies are stale shard copy. [float] -=== Make index creation resilient to index closing and full cluster crashes (STATUS: UNRELEASED, v5.0.0) +=== Make index creation resilient to index closing and full cluster crashes (STATUS: DONE, v5.0.0) Recovering an index requires a quorum (with an exception for 2) of shard copies to be available to allocate a primary. This means that a primary cannot be assigned if the cluster dies before enough shards have been allocated ({GIT}9126[#9126]). The same happens if an index @@ -211,7 +211,7 @@ shard will be allocated upon reopening the index. [float] -=== Use two phase commit for Cluster State publishing (STATUS: UNRELEASED, v5.0.0) +=== Use two phase commit for Cluster State publishing (STATUS: DONE, v5.0.0) A master node in Elasticsearch continuously https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html#fault-detection[monitors the cluster nodes] and removes any node from the cluster that doesn't respond to its pings in a timely @@ -225,8 +225,6 @@ a new phase to cluster state publishing where the proposed cluster state is sent but is not yet committed. Only once enough nodes (`discovery.zen.minimum_master_nodes`) actively acknowledge the change, it is committed and commit messages are sent to the nodes. See {GIT}13062[#13062]. -== Completed - [float] === Wait on incoming joins before electing local node as master (STATUS: DONE, v2.0.0) diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index 688eefc98d5..87ca5acf1ca 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -37,7 +37,7 @@ public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } @Override diff --git a/docs/src/test/resources/accounts.json b/docs/src/test/resources/accounts.json new file mode 100644 index 00000000000..28b2b82c3f5 --- /dev/null +++ b/docs/src/test/resources/accounts.json @@ -0,0 +1,2000 @@ +{"index":{"_id":"1"}} +{"account_number":1,"balance":39225,"firstname":"Amber","lastname":"Duke","age":32,"gender":"M","address":"880 Holmes Lane","employer":"Pyrami","email":"amberduke@pyrami.com","city":"Brogan","state":"IL"} +{"index":{"_id":"6"}} +{"account_number":6,"balance":5686,"firstname":"Hattie","lastname":"Bond","age":36,"gender":"M","address":"671 Bristol Street","employer":"Netagy","email":"hattiebond@netagy.com","city":"Dante","state":"TN"} +{"index":{"_id":"13"}} +{"account_number":13,"balance":32838,"firstname":"Nanette","lastname":"Bates","age":28,"gender":"F","address":"789 Madison Street","employer":"Quility","email":"nanettebates@quility.com","city":"Nogal","state":"VA"} +{"index":{"_id":"18"}} +{"account_number":18,"balance":4180,"firstname":"Dale","lastname":"Adams","age":33,"gender":"M","address":"467 Hutchinson Court","employer":"Boink","email":"daleadams@boink.com","city":"Orick","state":"MD"} +{"index":{"_id":"20"}} +{"account_number":20,"balance":16418,"firstname":"Elinor","lastname":"Ratliff","age":36,"gender":"M","address":"282 Kings Place","employer":"Scentric","email":"elinorratliff@scentric.com","city":"Ribera","state":"WA"} +{"index":{"_id":"25"}} +{"account_number":25,"balance":40540,"firstname":"Virginia","lastname":"Ayala","age":39,"gender":"F","address":"171 Putnam Avenue","employer":"Filodyne","email":"virginiaayala@filodyne.com","city":"Nicholson","state":"PA"} +{"index":{"_id":"32"}} +{"account_number":32,"balance":48086,"firstname":"Dillard","lastname":"Mcpherson","age":34,"gender":"F","address":"702 Quentin Street","employer":"Quailcom","email":"dillardmcpherson@quailcom.com","city":"Veguita","state":"IN"} +{"index":{"_id":"37"}} +{"account_number":37,"balance":18612,"firstname":"Mcgee","lastname":"Mooney","age":39,"gender":"M","address":"826 Fillmore Place","employer":"Reversus","email":"mcgeemooney@reversus.com","city":"Tooleville","state":"OK"} +{"index":{"_id":"44"}} +{"account_number":44,"balance":34487,"firstname":"Aurelia","lastname":"Harding","age":37,"gender":"M","address":"502 Baycliff Terrace","employer":"Orbalix","email":"aureliaharding@orbalix.com","city":"Yardville","state":"DE"} +{"index":{"_id":"49"}} +{"account_number":49,"balance":29104,"firstname":"Fulton","lastname":"Holt","age":23,"gender":"F","address":"451 Humboldt Street","employer":"Anocha","email":"fultonholt@anocha.com","city":"Sunriver","state":"RI"} +{"index":{"_id":"51"}} +{"account_number":51,"balance":14097,"firstname":"Burton","lastname":"Meyers","age":31,"gender":"F","address":"334 River Street","employer":"Bezal","email":"burtonmeyers@bezal.com","city":"Jacksonburg","state":"MO"} +{"index":{"_id":"56"}} +{"account_number":56,"balance":14992,"firstname":"Josie","lastname":"Nelson","age":32,"gender":"M","address":"857 Tabor Court","employer":"Emtrac","email":"josienelson@emtrac.com","city":"Sunnyside","state":"UT"} +{"index":{"_id":"63"}} +{"account_number":63,"balance":6077,"firstname":"Hughes","lastname":"Owens","age":30,"gender":"F","address":"510 Sedgwick Street","employer":"Valpreal","email":"hughesowens@valpreal.com","city":"Guilford","state":"KS"} +{"index":{"_id":"68"}} +{"account_number":68,"balance":44214,"firstname":"Hall","lastname":"Key","age":25,"gender":"F","address":"927 Bay Parkway","employer":"Eventex","email":"hallkey@eventex.com","city":"Shawmut","state":"CA"} +{"index":{"_id":"70"}} +{"account_number":70,"balance":38172,"firstname":"Deidre","lastname":"Thompson","age":33,"gender":"F","address":"685 School Lane","employer":"Netplode","email":"deidrethompson@netplode.com","city":"Chestnut","state":"GA"} +{"index":{"_id":"75"}} +{"account_number":75,"balance":40500,"firstname":"Sandoval","lastname":"Kramer","age":22,"gender":"F","address":"166 Irvington Place","employer":"Overfork","email":"sandovalkramer@overfork.com","city":"Limestone","state":"NH"} +{"index":{"_id":"82"}} +{"account_number":82,"balance":41412,"firstname":"Concetta","lastname":"Barnes","age":39,"gender":"F","address":"195 Bayview Place","employer":"Fitcore","email":"concettabarnes@fitcore.com","city":"Summerfield","state":"NC"} +{"index":{"_id":"87"}} +{"account_number":87,"balance":1133,"firstname":"Hewitt","lastname":"Kidd","age":22,"gender":"M","address":"446 Halleck Street","employer":"Isologics","email":"hewittkidd@isologics.com","city":"Coalmont","state":"ME"} +{"index":{"_id":"94"}} +{"account_number":94,"balance":41060,"firstname":"Brittany","lastname":"Cabrera","age":30,"gender":"F","address":"183 Kathleen Court","employer":"Mixers","email":"brittanycabrera@mixers.com","city":"Cornucopia","state":"AZ"} +{"index":{"_id":"99"}} +{"account_number":99,"balance":47159,"firstname":"Ratliff","lastname":"Heath","age":39,"gender":"F","address":"806 Rockwell Place","employer":"Zappix","email":"ratliffheath@zappix.com","city":"Shaft","state":"ND"} +{"index":{"_id":"102"}} +{"account_number":102,"balance":29712,"firstname":"Dena","lastname":"Olson","age":27,"gender":"F","address":"759 Newkirk Avenue","employer":"Hinway","email":"denaolson@hinway.com","city":"Choctaw","state":"NJ"} +{"index":{"_id":"107"}} +{"account_number":107,"balance":48844,"firstname":"Randi","lastname":"Rich","age":28,"gender":"M","address":"694 Jefferson Street","employer":"Netplax","email":"randirich@netplax.com","city":"Bellfountain","state":"SC"} +{"index":{"_id":"114"}} +{"account_number":114,"balance":43045,"firstname":"Josephine","lastname":"Joseph","age":31,"gender":"F","address":"451 Oriental Court","employer":"Turnabout","email":"josephinejoseph@turnabout.com","city":"Sedley","state":"AL"} +{"index":{"_id":"119"}} +{"account_number":119,"balance":49222,"firstname":"Laverne","lastname":"Johnson","age":28,"gender":"F","address":"302 Howard Place","employer":"Senmei","email":"lavernejohnson@senmei.com","city":"Herlong","state":"DC"} +{"index":{"_id":"121"}} +{"account_number":121,"balance":19594,"firstname":"Acevedo","lastname":"Dorsey","age":32,"gender":"M","address":"479 Nova Court","employer":"Netropic","email":"acevedodorsey@netropic.com","city":"Islandia","state":"CT"} +{"index":{"_id":"126"}} +{"account_number":126,"balance":3607,"firstname":"Effie","lastname":"Gates","age":39,"gender":"F","address":"620 National Drive","employer":"Digitalus","email":"effiegates@digitalus.com","city":"Blodgett","state":"MD"} +{"index":{"_id":"133"}} +{"account_number":133,"balance":26135,"firstname":"Deena","lastname":"Richmond","age":36,"gender":"F","address":"646 Underhill Avenue","employer":"Sunclipse","email":"deenarichmond@sunclipse.com","city":"Austinburg","state":"SC"} +{"index":{"_id":"138"}} +{"account_number":138,"balance":9006,"firstname":"Daniel","lastname":"Arnold","age":39,"gender":"F","address":"422 Malbone Street","employer":"Ecstasia","email":"danielarnold@ecstasia.com","city":"Gardiner","state":"MO"} +{"index":{"_id":"140"}} +{"account_number":140,"balance":26696,"firstname":"Cotton","lastname":"Christensen","age":32,"gender":"M","address":"878 Schermerhorn Street","employer":"Prowaste","email":"cottonchristensen@prowaste.com","city":"Mayfair","state":"LA"} +{"index":{"_id":"145"}} +{"account_number":145,"balance":47406,"firstname":"Rowena","lastname":"Wilkinson","age":32,"gender":"M","address":"891 Elton Street","employer":"Asimiline","email":"rowenawilkinson@asimiline.com","city":"Ripley","state":"NH"} +{"index":{"_id":"152"}} +{"account_number":152,"balance":8088,"firstname":"Wolfe","lastname":"Rocha","age":21,"gender":"M","address":"457 Guernsey Street","employer":"Hivedom","email":"wolferocha@hivedom.com","city":"Adelino","state":"MS"} +{"index":{"_id":"157"}} +{"account_number":157,"balance":39868,"firstname":"Claudia","lastname":"Terry","age":20,"gender":"F","address":"132 Gunnison Court","employer":"Lumbrex","email":"claudiaterry@lumbrex.com","city":"Castleton","state":"MD"} +{"index":{"_id":"164"}} +{"account_number":164,"balance":9101,"firstname":"Cummings","lastname":"Little","age":26,"gender":"F","address":"308 Schaefer Street","employer":"Comtrak","email":"cummingslittle@comtrak.com","city":"Chaparrito","state":"WI"} +{"index":{"_id":"169"}} +{"account_number":169,"balance":45953,"firstname":"Hollie","lastname":"Osborn","age":34,"gender":"M","address":"671 Seaview Court","employer":"Musaphics","email":"hollieosborn@musaphics.com","city":"Hanover","state":"GA"} +{"index":{"_id":"171"}} +{"account_number":171,"balance":7091,"firstname":"Nelda","lastname":"Hopper","age":39,"gender":"M","address":"742 Prospect Place","employer":"Equicom","email":"neldahopper@equicom.com","city":"Finderne","state":"SC"} +{"index":{"_id":"176"}} +{"account_number":176,"balance":18607,"firstname":"Kemp","lastname":"Walters","age":28,"gender":"F","address":"906 Howard Avenue","employer":"Eyewax","email":"kempwalters@eyewax.com","city":"Why","state":"KY"} +{"index":{"_id":"183"}} +{"account_number":183,"balance":14223,"firstname":"Hudson","lastname":"English","age":26,"gender":"F","address":"823 Herkimer Place","employer":"Xinware","email":"hudsonenglish@xinware.com","city":"Robbins","state":"ND"} +{"index":{"_id":"188"}} +{"account_number":188,"balance":41504,"firstname":"Tia","lastname":"Miranda","age":24,"gender":"F","address":"583 Ainslie Street","employer":"Jasper","email":"tiamiranda@jasper.com","city":"Summerset","state":"UT"} +{"index":{"_id":"190"}} +{"account_number":190,"balance":3150,"firstname":"Blake","lastname":"Davidson","age":30,"gender":"F","address":"636 Diamond Street","employer":"Quantasis","email":"blakedavidson@quantasis.com","city":"Crumpler","state":"KY"} +{"index":{"_id":"195"}} +{"account_number":195,"balance":5025,"firstname":"Kaye","lastname":"Gibson","age":31,"gender":"M","address":"955 Hopkins Street","employer":"Zork","email":"kayegibson@zork.com","city":"Ola","state":"WY"} +{"index":{"_id":"203"}} +{"account_number":203,"balance":21890,"firstname":"Eve","lastname":"Wyatt","age":33,"gender":"M","address":"435 Furman Street","employer":"Assitia","email":"evewyatt@assitia.com","city":"Jamestown","state":"MN"} +{"index":{"_id":"208"}} +{"account_number":208,"balance":40760,"firstname":"Garcia","lastname":"Hess","age":26,"gender":"F","address":"810 Nostrand Avenue","employer":"Quiltigen","email":"garciahess@quiltigen.com","city":"Brooktrails","state":"GA"} +{"index":{"_id":"210"}} +{"account_number":210,"balance":33946,"firstname":"Cherry","lastname":"Carey","age":24,"gender":"M","address":"539 Tiffany Place","employer":"Martgo","email":"cherrycarey@martgo.com","city":"Fairacres","state":"AK"} +{"index":{"_id":"215"}} +{"account_number":215,"balance":37427,"firstname":"Copeland","lastname":"Solomon","age":20,"gender":"M","address":"741 McDonald Avenue","employer":"Recognia","email":"copelandsolomon@recognia.com","city":"Edmund","state":"ME"} +{"index":{"_id":"222"}} +{"account_number":222,"balance":14764,"firstname":"Rachelle","lastname":"Rice","age":36,"gender":"M","address":"333 Narrows Avenue","employer":"Enaut","email":"rachellerice@enaut.com","city":"Wright","state":"AZ"} +{"index":{"_id":"227"}} +{"account_number":227,"balance":19780,"firstname":"Coleman","lastname":"Berg","age":22,"gender":"M","address":"776 Little Street","employer":"Exoteric","email":"colemanberg@exoteric.com","city":"Eagleville","state":"WV"} +{"index":{"_id":"234"}} +{"account_number":234,"balance":44207,"firstname":"Betty","lastname":"Hall","age":37,"gender":"F","address":"709 Garfield Place","employer":"Miraclis","email":"bettyhall@miraclis.com","city":"Bendon","state":"NY"} +{"index":{"_id":"239"}} +{"account_number":239,"balance":25719,"firstname":"Chang","lastname":"Boyer","age":36,"gender":"M","address":"895 Brigham Street","employer":"Qaboos","email":"changboyer@qaboos.com","city":"Belgreen","state":"NH"} +{"index":{"_id":"241"}} +{"account_number":241,"balance":25379,"firstname":"Schroeder","lastname":"Harrington","age":26,"gender":"M","address":"610 Tapscott Avenue","employer":"Otherway","email":"schroederharrington@otherway.com","city":"Ebro","state":"TX"} +{"index":{"_id":"246"}} +{"account_number":246,"balance":28405,"firstname":"Katheryn","lastname":"Foster","age":21,"gender":"F","address":"259 Kane Street","employer":"Quantalia","email":"katherynfoster@quantalia.com","city":"Bath","state":"TX"} +{"index":{"_id":"253"}} +{"account_number":253,"balance":20240,"firstname":"Melissa","lastname":"Gould","age":31,"gender":"M","address":"440 Fuller Place","employer":"Buzzopia","email":"melissagould@buzzopia.com","city":"Lumberton","state":"MD"} +{"index":{"_id":"258"}} +{"account_number":258,"balance":5712,"firstname":"Lindsey","lastname":"Hawkins","age":37,"gender":"M","address":"706 Frost Street","employer":"Enormo","email":"lindseyhawkins@enormo.com","city":"Gardners","state":"AK"} +{"index":{"_id":"260"}} +{"account_number":260,"balance":2726,"firstname":"Kari","lastname":"Skinner","age":30,"gender":"F","address":"735 Losee Terrace","employer":"Singavera","email":"kariskinner@singavera.com","city":"Rushford","state":"WV"} +{"index":{"_id":"265"}} +{"account_number":265,"balance":46910,"firstname":"Marion","lastname":"Schneider","age":26,"gender":"F","address":"574 Everett Avenue","employer":"Evidends","email":"marionschneider@evidends.com","city":"Maplewood","state":"WY"} +{"index":{"_id":"272"}} +{"account_number":272,"balance":19253,"firstname":"Lilly","lastname":"Morgan","age":25,"gender":"F","address":"689 Fleet Street","employer":"Biolive","email":"lillymorgan@biolive.com","city":"Sunbury","state":"OH"} +{"index":{"_id":"277"}} +{"account_number":277,"balance":29564,"firstname":"Romero","lastname":"Lott","age":31,"gender":"M","address":"456 Danforth Street","employer":"Plasto","email":"romerolott@plasto.com","city":"Vincent","state":"VT"} +{"index":{"_id":"284"}} +{"account_number":284,"balance":22806,"firstname":"Randolph","lastname":"Banks","age":29,"gender":"M","address":"875 Hamilton Avenue","employer":"Caxt","email":"randolphbanks@caxt.com","city":"Crawfordsville","state":"WA"} +{"index":{"_id":"289"}} +{"account_number":289,"balance":7798,"firstname":"Blair","lastname":"Church","age":29,"gender":"M","address":"370 Sutton Street","employer":"Cubix","email":"blairchurch@cubix.com","city":"Nile","state":"NH"} +{"index":{"_id":"291"}} +{"account_number":291,"balance":19955,"firstname":"Lynn","lastname":"Pollard","age":40,"gender":"F","address":"685 Pierrepont Street","employer":"Slambda","email":"lynnpollard@slambda.com","city":"Mappsville","state":"ID"} +{"index":{"_id":"296"}} +{"account_number":296,"balance":24606,"firstname":"Rosa","lastname":"Oliver","age":34,"gender":"M","address":"168 Woodbine Street","employer":"Idetica","email":"rosaoliver@idetica.com","city":"Robinson","state":"WY"} +{"index":{"_id":"304"}} +{"account_number":304,"balance":28647,"firstname":"Palmer","lastname":"Clark","age":35,"gender":"M","address":"866 Boulevard Court","employer":"Maximind","email":"palmerclark@maximind.com","city":"Avalon","state":"NH"} +{"index":{"_id":"309"}} +{"account_number":309,"balance":3830,"firstname":"Rosemarie","lastname":"Nieves","age":30,"gender":"M","address":"206 Alice Court","employer":"Zounds","email":"rosemarienieves@zounds.com","city":"Ferney","state":"AR"} +{"index":{"_id":"311"}} +{"account_number":311,"balance":13388,"firstname":"Vinson","lastname":"Ballard","age":23,"gender":"F","address":"960 Glendale Court","employer":"Gynk","email":"vinsonballard@gynk.com","city":"Fairforest","state":"WY"} +{"index":{"_id":"316"}} +{"account_number":316,"balance":8214,"firstname":"Anita","lastname":"Ewing","age":32,"gender":"M","address":"396 Lombardy Street","employer":"Panzent","email":"anitaewing@panzent.com","city":"Neahkahnie","state":"WY"} +{"index":{"_id":"323"}} +{"account_number":323,"balance":42230,"firstname":"Chelsea","lastname":"Gamble","age":34,"gender":"F","address":"356 Dare Court","employer":"Isosphere","email":"chelseagamble@isosphere.com","city":"Dundee","state":"MD"} +{"index":{"_id":"328"}} +{"account_number":328,"balance":12523,"firstname":"Good","lastname":"Campbell","age":27,"gender":"F","address":"438 Hicks Street","employer":"Gracker","email":"goodcampbell@gracker.com","city":"Marion","state":"CA"} +{"index":{"_id":"330"}} +{"account_number":330,"balance":41620,"firstname":"Yvette","lastname":"Browning","age":34,"gender":"F","address":"431 Beekman Place","employer":"Marketoid","email":"yvettebrowning@marketoid.com","city":"Talpa","state":"CO"} +{"index":{"_id":"335"}} +{"account_number":335,"balance":35433,"firstname":"Vera","lastname":"Hansen","age":24,"gender":"M","address":"252 Bushwick Avenue","employer":"Zanilla","email":"verahansen@zanilla.com","city":"Manila","state":"TN"} +{"index":{"_id":"342"}} +{"account_number":342,"balance":33670,"firstname":"Vivian","lastname":"Wells","age":36,"gender":"M","address":"570 Cobek Court","employer":"Nutralab","email":"vivianwells@nutralab.com","city":"Fontanelle","state":"OK"} +{"index":{"_id":"347"}} +{"account_number":347,"balance":36038,"firstname":"Gould","lastname":"Carson","age":24,"gender":"F","address":"784 Pulaski Street","employer":"Mobildata","email":"gouldcarson@mobildata.com","city":"Goochland","state":"MI"} +{"index":{"_id":"354"}} +{"account_number":354,"balance":21294,"firstname":"Kidd","lastname":"Mclean","age":22,"gender":"M","address":"691 Saratoga Avenue","employer":"Ronbert","email":"kiddmclean@ronbert.com","city":"Tioga","state":"ME"} +{"index":{"_id":"359"}} +{"account_number":359,"balance":29927,"firstname":"Vanessa","lastname":"Harvey","age":28,"gender":"F","address":"679 Rutledge Street","employer":"Zentime","email":"vanessaharvey@zentime.com","city":"Williston","state":"IL"} +{"index":{"_id":"361"}} +{"account_number":361,"balance":23659,"firstname":"Noreen","lastname":"Shelton","age":36,"gender":"M","address":"702 Tillary Street","employer":"Medmex","email":"noreenshelton@medmex.com","city":"Derwood","state":"NH"} +{"index":{"_id":"366"}} +{"account_number":366,"balance":42368,"firstname":"Lydia","lastname":"Cooke","age":31,"gender":"M","address":"470 Coleman Street","employer":"Comstar","email":"lydiacooke@comstar.com","city":"Datil","state":"TN"} +{"index":{"_id":"373"}} +{"account_number":373,"balance":9671,"firstname":"Simpson","lastname":"Carpenter","age":21,"gender":"M","address":"837 Horace Court","employer":"Snips","email":"simpsoncarpenter@snips.com","city":"Tolu","state":"MA"} +{"index":{"_id":"378"}} +{"account_number":378,"balance":27100,"firstname":"Watson","lastname":"Simpson","age":36,"gender":"F","address":"644 Thomas Street","employer":"Wrapture","email":"watsonsimpson@wrapture.com","city":"Keller","state":"TX"} +{"index":{"_id":"380"}} +{"account_number":380,"balance":35628,"firstname":"Fernandez","lastname":"Reid","age":33,"gender":"F","address":"154 Melba Court","employer":"Cosmosis","email":"fernandezreid@cosmosis.com","city":"Boyd","state":"NE"} +{"index":{"_id":"385"}} +{"account_number":385,"balance":11022,"firstname":"Rosalinda","lastname":"Valencia","age":22,"gender":"M","address":"933 Lloyd Street","employer":"Zoarere","email":"rosalindavalencia@zoarere.com","city":"Waverly","state":"GA"} +{"index":{"_id":"392"}} +{"account_number":392,"balance":31613,"firstname":"Dotson","lastname":"Dean","age":35,"gender":"M","address":"136 Ford Street","employer":"Petigems","email":"dotsondean@petigems.com","city":"Chical","state":"SD"} +{"index":{"_id":"397"}} +{"account_number":397,"balance":37418,"firstname":"Leonard","lastname":"Gray","age":36,"gender":"F","address":"840 Morgan Avenue","employer":"Recritube","email":"leonardgray@recritube.com","city":"Edenburg","state":"AL"} +{"index":{"_id":"400"}} +{"account_number":400,"balance":20685,"firstname":"Kane","lastname":"King","age":21,"gender":"F","address":"405 Cornelia Street","employer":"Tri@Tribalog","email":"kaneking@tri@tribalog.com","city":"Gulf","state":"VT"} +{"index":{"_id":"405"}} +{"account_number":405,"balance":5679,"firstname":"Strickland","lastname":"Fuller","age":26,"gender":"M","address":"990 Concord Street","employer":"Digique","email":"stricklandfuller@digique.com","city":"Southmont","state":"NV"} +{"index":{"_id":"412"}} +{"account_number":412,"balance":27436,"firstname":"Ilene","lastname":"Abbott","age":26,"gender":"M","address":"846 Vine Street","employer":"Typhonica","email":"ileneabbott@typhonica.com","city":"Cedarville","state":"VT"} +{"index":{"_id":"417"}} +{"account_number":417,"balance":1788,"firstname":"Wheeler","lastname":"Ayers","age":35,"gender":"F","address":"677 Hope Street","employer":"Fortean","email":"wheelerayers@fortean.com","city":"Ironton","state":"PA"} +{"index":{"_id":"424"}} +{"account_number":424,"balance":36818,"firstname":"Tracie","lastname":"Gregory","age":34,"gender":"M","address":"112 Hunterfly Place","employer":"Comstruct","email":"traciegregory@comstruct.com","city":"Onton","state":"TN"} +{"index":{"_id":"429"}} +{"account_number":429,"balance":46970,"firstname":"Cantu","lastname":"Lindsey","age":31,"gender":"M","address":"404 Willoughby Avenue","employer":"Inquala","email":"cantulindsey@inquala.com","city":"Cowiche","state":"IA"} +{"index":{"_id":"431"}} +{"account_number":431,"balance":13136,"firstname":"Laurie","lastname":"Shaw","age":26,"gender":"F","address":"263 Aviation Road","employer":"Zillanet","email":"laurieshaw@zillanet.com","city":"Harmon","state":"WV"} +{"index":{"_id":"436"}} +{"account_number":436,"balance":27585,"firstname":"Alexander","lastname":"Sargent","age":23,"gender":"M","address":"363 Albemarle Road","employer":"Fangold","email":"alexandersargent@fangold.com","city":"Calpine","state":"OR"} +{"index":{"_id":"443"}} +{"account_number":443,"balance":7588,"firstname":"Huff","lastname":"Thomas","age":23,"gender":"M","address":"538 Erskine Loop","employer":"Accufarm","email":"huffthomas@accufarm.com","city":"Corinne","state":"AL"} +{"index":{"_id":"448"}} +{"account_number":448,"balance":22776,"firstname":"Adriana","lastname":"Mcfadden","age":35,"gender":"F","address":"984 Woodside Avenue","employer":"Telequiet","email":"adrianamcfadden@telequiet.com","city":"Darrtown","state":"WI"} +{"index":{"_id":"450"}} +{"account_number":450,"balance":2643,"firstname":"Bradford","lastname":"Nielsen","age":25,"gender":"M","address":"487 Keen Court","employer":"Exovent","email":"bradfordnielsen@exovent.com","city":"Hamilton","state":"DE"} +{"index":{"_id":"455"}} +{"account_number":455,"balance":39556,"firstname":"Lynn","lastname":"Tran","age":36,"gender":"M","address":"741 Richmond Street","employer":"Optyk","email":"lynntran@optyk.com","city":"Clinton","state":"WV"} +{"index":{"_id":"462"}} +{"account_number":462,"balance":10871,"firstname":"Calderon","lastname":"Day","age":27,"gender":"M","address":"810 Milford Street","employer":"Cofine","email":"calderonday@cofine.com","city":"Kula","state":"OK"} +{"index":{"_id":"467"}} +{"account_number":467,"balance":6312,"firstname":"Angelica","lastname":"May","age":32,"gender":"F","address":"384 Karweg Place","employer":"Keeg","email":"angelicamay@keeg.com","city":"Tetherow","state":"IA"} +{"index":{"_id":"474"}} +{"account_number":474,"balance":35896,"firstname":"Obrien","lastname":"Walton","age":40,"gender":"F","address":"192 Ide Court","employer":"Suremax","email":"obrienwalton@suremax.com","city":"Crucible","state":"UT"} +{"index":{"_id":"479"}} +{"account_number":479,"balance":31865,"firstname":"Cameron","lastname":"Ross","age":40,"gender":"M","address":"904 Bouck Court","employer":"Telpod","email":"cameronross@telpod.com","city":"Nord","state":"MO"} +{"index":{"_id":"481"}} +{"account_number":481,"balance":20024,"firstname":"Lina","lastname":"Stanley","age":33,"gender":"M","address":"361 Hanover Place","employer":"Strozen","email":"linastanley@strozen.com","city":"Wyoming","state":"NC"} +{"index":{"_id":"486"}} +{"account_number":486,"balance":35902,"firstname":"Dixie","lastname":"Fuentes","age":22,"gender":"F","address":"991 Applegate Court","employer":"Portico","email":"dixiefuentes@portico.com","city":"Salix","state":"VA"} +{"index":{"_id":"493"}} +{"account_number":493,"balance":5871,"firstname":"Campbell","lastname":"Best","age":24,"gender":"M","address":"297 Friel Place","employer":"Fanfare","email":"campbellbest@fanfare.com","city":"Kidder","state":"GA"} +{"index":{"_id":"498"}} +{"account_number":498,"balance":10516,"firstname":"Stella","lastname":"Hinton","age":39,"gender":"F","address":"649 Columbia Place","employer":"Flyboyz","email":"stellahinton@flyboyz.com","city":"Crenshaw","state":"SC"} +{"index":{"_id":"501"}} +{"account_number":501,"balance":16572,"firstname":"Kelley","lastname":"Ochoa","age":36,"gender":"M","address":"451 Clifton Place","employer":"Bluplanet","email":"kelleyochoa@bluplanet.com","city":"Gouglersville","state":"CT"} +{"index":{"_id":"506"}} +{"account_number":506,"balance":43440,"firstname":"Davidson","lastname":"Salas","age":28,"gender":"M","address":"731 Cleveland Street","employer":"Sequitur","email":"davidsonsalas@sequitur.com","city":"Lloyd","state":"ME"} +{"index":{"_id":"513"}} +{"account_number":513,"balance":30040,"firstname":"Maryellen","lastname":"Rose","age":37,"gender":"F","address":"428 Durland Place","employer":"Waterbaby","email":"maryellenrose@waterbaby.com","city":"Kiskimere","state":"RI"} +{"index":{"_id":"518"}} +{"account_number":518,"balance":48954,"firstname":"Finch","lastname":"Curtis","age":29,"gender":"F","address":"137 Ryder Street","employer":"Viagrand","email":"finchcurtis@viagrand.com","city":"Riverton","state":"MO"} +{"index":{"_id":"520"}} +{"account_number":520,"balance":27987,"firstname":"Brandy","lastname":"Calhoun","age":32,"gender":"M","address":"818 Harden Street","employer":"Maxemia","email":"brandycalhoun@maxemia.com","city":"Sidman","state":"OR"} +{"index":{"_id":"525"}} +{"account_number":525,"balance":23545,"firstname":"Holly","lastname":"Miles","age":25,"gender":"M","address":"746 Ludlam Place","employer":"Xurban","email":"hollymiles@xurban.com","city":"Harold","state":"AR"} +{"index":{"_id":"532"}} +{"account_number":532,"balance":17207,"firstname":"Hardin","lastname":"Kirk","age":26,"gender":"M","address":"268 Canarsie Road","employer":"Exposa","email":"hardinkirk@exposa.com","city":"Stouchsburg","state":"IL"} +{"index":{"_id":"537"}} +{"account_number":537,"balance":31069,"firstname":"Morin","lastname":"Frost","age":29,"gender":"M","address":"910 Lake Street","employer":"Primordia","email":"morinfrost@primordia.com","city":"Rivera","state":"DE"} +{"index":{"_id":"544"}} +{"account_number":544,"balance":41735,"firstname":"Short","lastname":"Dennis","age":21,"gender":"F","address":"908 Glen Street","employer":"Minga","email":"shortdennis@minga.com","city":"Dale","state":"KY"} +{"index":{"_id":"549"}} +{"account_number":549,"balance":1932,"firstname":"Jacqueline","lastname":"Maxwell","age":40,"gender":"M","address":"444 Schenck Place","employer":"Fuelworks","email":"jacquelinemaxwell@fuelworks.com","city":"Oretta","state":"OR"} +{"index":{"_id":"551"}} +{"account_number":551,"balance":21732,"firstname":"Milagros","lastname":"Travis","age":27,"gender":"F","address":"380 Murdock Court","employer":"Sloganaut","email":"milagrostravis@sloganaut.com","city":"Homeland","state":"AR"} +{"index":{"_id":"556"}} +{"account_number":556,"balance":36420,"firstname":"Collier","lastname":"Odonnell","age":35,"gender":"M","address":"591 Nolans Lane","employer":"Sultraxin","email":"collierodonnell@sultraxin.com","city":"Fulford","state":"MD"} +{"index":{"_id":"563"}} +{"account_number":563,"balance":43403,"firstname":"Morgan","lastname":"Torres","age":30,"gender":"F","address":"672 Belvidere Street","employer":"Quonata","email":"morgantorres@quonata.com","city":"Hollymead","state":"KY"} +{"index":{"_id":"568"}} +{"account_number":568,"balance":36628,"firstname":"Lesa","lastname":"Maynard","age":29,"gender":"F","address":"295 Whitty Lane","employer":"Coash","email":"lesamaynard@coash.com","city":"Broadlands","state":"VT"} +{"index":{"_id":"570"}} +{"account_number":570,"balance":26751,"firstname":"Church","lastname":"Mercado","age":24,"gender":"F","address":"892 Wyckoff Street","employer":"Xymonk","email":"churchmercado@xymonk.com","city":"Gloucester","state":"KY"} +{"index":{"_id":"575"}} +{"account_number":575,"balance":12588,"firstname":"Buchanan","lastname":"Pope","age":39,"gender":"M","address":"581 Sumner Place","employer":"Stucco","email":"buchananpope@stucco.com","city":"Ellerslie","state":"MD"} +{"index":{"_id":"582"}} +{"account_number":582,"balance":33371,"firstname":"Manning","lastname":"Guthrie","age":24,"gender":"F","address":"271 Jodie Court","employer":"Xerex","email":"manningguthrie@xerex.com","city":"Breinigsville","state":"NM"} +{"index":{"_id":"587"}} +{"account_number":587,"balance":3468,"firstname":"Carly","lastname":"Johns","age":33,"gender":"M","address":"390 Noll Street","employer":"Gallaxia","email":"carlyjohns@gallaxia.com","city":"Emison","state":"DC"} +{"index":{"_id":"594"}} +{"account_number":594,"balance":28194,"firstname":"Golden","lastname":"Donovan","age":26,"gender":"M","address":"199 Jewel Street","employer":"Organica","email":"goldendonovan@organica.com","city":"Macdona","state":"RI"} +{"index":{"_id":"599"}} +{"account_number":599,"balance":11944,"firstname":"Joanna","lastname":"Jennings","age":36,"gender":"F","address":"318 Irving Street","employer":"Extremo","email":"joannajennings@extremo.com","city":"Bartley","state":"MI"} +{"index":{"_id":"602"}} +{"account_number":602,"balance":38699,"firstname":"Mcgowan","lastname":"Mcclain","age":33,"gender":"M","address":"361 Stoddard Place","employer":"Oatfarm","email":"mcgowanmcclain@oatfarm.com","city":"Kapowsin","state":"MI"} +{"index":{"_id":"607"}} +{"account_number":607,"balance":38350,"firstname":"White","lastname":"Small","age":38,"gender":"F","address":"736 Judge Street","employer":"Immunics","email":"whitesmall@immunics.com","city":"Fairfield","state":"HI"} +{"index":{"_id":"614"}} +{"account_number":614,"balance":13157,"firstname":"Salazar","lastname":"Howard","age":35,"gender":"F","address":"847 Imlay Street","employer":"Retrack","email":"salazarhoward@retrack.com","city":"Grill","state":"FL"} +{"index":{"_id":"619"}} +{"account_number":619,"balance":48755,"firstname":"Grimes","lastname":"Reynolds","age":36,"gender":"M","address":"378 Denton Place","employer":"Frenex","email":"grimesreynolds@frenex.com","city":"Murillo","state":"LA"} +{"index":{"_id":"621"}} +{"account_number":621,"balance":35480,"firstname":"Leslie","lastname":"Sloan","age":26,"gender":"F","address":"336 Kansas Place","employer":"Dancity","email":"lesliesloan@dancity.com","city":"Corriganville","state":"AR"} +{"index":{"_id":"626"}} +{"account_number":626,"balance":19498,"firstname":"Ava","lastname":"Richardson","age":31,"gender":"F","address":"666 Nautilus Avenue","employer":"Cinaster","email":"avarichardson@cinaster.com","city":"Sutton","state":"AL"} +{"index":{"_id":"633"}} +{"account_number":633,"balance":35874,"firstname":"Conner","lastname":"Ramos","age":34,"gender":"M","address":"575 Agate Court","employer":"Insource","email":"connerramos@insource.com","city":"Madaket","state":"OK"} +{"index":{"_id":"638"}} +{"account_number":638,"balance":2658,"firstname":"Bridget","lastname":"Gallegos","age":31,"gender":"M","address":"383 Wogan Terrace","employer":"Songlines","email":"bridgetgallegos@songlines.com","city":"Linganore","state":"WA"} +{"index":{"_id":"640"}} +{"account_number":640,"balance":35596,"firstname":"Candace","lastname":"Hancock","age":25,"gender":"M","address":"574 Riverdale Avenue","employer":"Animalia","email":"candacehancock@animalia.com","city":"Blandburg","state":"KY"} +{"index":{"_id":"645"}} +{"account_number":645,"balance":29362,"firstname":"Edwina","lastname":"Hutchinson","age":26,"gender":"F","address":"892 Pacific Street","employer":"Essensia","email":"edwinahutchinson@essensia.com","city":"Dowling","state":"NE"} +{"index":{"_id":"652"}} +{"account_number":652,"balance":17363,"firstname":"Bonner","lastname":"Garner","age":26,"gender":"M","address":"219 Grafton Street","employer":"Utarian","email":"bonnergarner@utarian.com","city":"Vandiver","state":"PA"} +{"index":{"_id":"657"}} +{"account_number":657,"balance":40475,"firstname":"Kathleen","lastname":"Wilder","age":34,"gender":"F","address":"286 Sutter Avenue","employer":"Solgan","email":"kathleenwilder@solgan.com","city":"Graniteville","state":"MI"} +{"index":{"_id":"664"}} +{"account_number":664,"balance":16163,"firstname":"Hart","lastname":"Mccormick","age":40,"gender":"M","address":"144 Guider Avenue","employer":"Dyno","email":"hartmccormick@dyno.com","city":"Carbonville","state":"ID"} +{"index":{"_id":"669"}} +{"account_number":669,"balance":16934,"firstname":"Jewel","lastname":"Estrada","age":28,"gender":"M","address":"896 Meeker Avenue","employer":"Zilla","email":"jewelestrada@zilla.com","city":"Goodville","state":"PA"} +{"index":{"_id":"671"}} +{"account_number":671,"balance":29029,"firstname":"Antoinette","lastname":"Cook","age":34,"gender":"M","address":"375 Cumberland Street","employer":"Harmoney","email":"antoinettecook@harmoney.com","city":"Bergoo","state":"VT"} +{"index":{"_id":"676"}} +{"account_number":676,"balance":23842,"firstname":"Lisa","lastname":"Dudley","age":34,"gender":"M","address":"506 Vanderveer Street","employer":"Tropoli","email":"lisadudley@tropoli.com","city":"Konterra","state":"NY"} +{"index":{"_id":"683"}} +{"account_number":683,"balance":4381,"firstname":"Matilda","lastname":"Berger","age":39,"gender":"M","address":"884 Noble Street","employer":"Fibrodyne","email":"matildaberger@fibrodyne.com","city":"Shepardsville","state":"TN"} +{"index":{"_id":"688"}} +{"account_number":688,"balance":17931,"firstname":"Freeman","lastname":"Zamora","age":22,"gender":"F","address":"114 Herzl Street","employer":"Elemantra","email":"freemanzamora@elemantra.com","city":"Libertytown","state":"NM"} +{"index":{"_id":"690"}} +{"account_number":690,"balance":18127,"firstname":"Russo","lastname":"Swanson","age":35,"gender":"F","address":"256 Roebling Street","employer":"Zaj","email":"russoswanson@zaj.com","city":"Hoagland","state":"MI"} +{"index":{"_id":"695"}} +{"account_number":695,"balance":36800,"firstname":"Gonzales","lastname":"Mcfarland","age":26,"gender":"F","address":"647 Louisa Street","employer":"Songbird","email":"gonzalesmcfarland@songbird.com","city":"Crisman","state":"ID"} +{"index":{"_id":"703"}} +{"account_number":703,"balance":27443,"firstname":"Dona","lastname":"Burton","age":29,"gender":"M","address":"489 Flatlands Avenue","employer":"Cytrex","email":"donaburton@cytrex.com","city":"Reno","state":"VA"} +{"index":{"_id":"708"}} +{"account_number":708,"balance":34002,"firstname":"May","lastname":"Ortiz","age":28,"gender":"F","address":"244 Chauncey Street","employer":"Syntac","email":"mayortiz@syntac.com","city":"Munjor","state":"ID"} +{"index":{"_id":"710"}} +{"account_number":710,"balance":33650,"firstname":"Shelton","lastname":"Stark","age":37,"gender":"M","address":"404 Ovington Avenue","employer":"Kraggle","email":"sheltonstark@kraggle.com","city":"Ogema","state":"TN"} +{"index":{"_id":"715"}} +{"account_number":715,"balance":23734,"firstname":"Tammi","lastname":"Hodge","age":24,"gender":"M","address":"865 Church Lane","employer":"Netur","email":"tammihodge@netur.com","city":"Lacomb","state":"KS"} +{"index":{"_id":"722"}} +{"account_number":722,"balance":27256,"firstname":"Roberts","lastname":"Beasley","age":34,"gender":"F","address":"305 Kings Hwy","employer":"Quintity","email":"robertsbeasley@quintity.com","city":"Hayden","state":"PA"} +{"index":{"_id":"727"}} +{"account_number":727,"balance":27263,"firstname":"Natasha","lastname":"Knapp","age":36,"gender":"M","address":"723 Hubbard Street","employer":"Exostream","email":"natashaknapp@exostream.com","city":"Trexlertown","state":"LA"} +{"index":{"_id":"734"}} +{"account_number":734,"balance":20325,"firstname":"Keri","lastname":"Kinney","age":23,"gender":"M","address":"490 Balfour Place","employer":"Retrotex","email":"kerikinney@retrotex.com","city":"Salunga","state":"PA"} +{"index":{"_id":"739"}} +{"account_number":739,"balance":39063,"firstname":"Gwen","lastname":"Hardy","age":33,"gender":"F","address":"733 Stuart Street","employer":"Exozent","email":"gwenhardy@exozent.com","city":"Drytown","state":"NY"} +{"index":{"_id":"741"}} +{"account_number":741,"balance":33074,"firstname":"Nielsen","lastname":"Good","age":22,"gender":"M","address":"404 Norfolk Street","employer":"Kiggle","email":"nielsengood@kiggle.com","city":"Cumberland","state":"WA"} +{"index":{"_id":"746"}} +{"account_number":746,"balance":15970,"firstname":"Marguerite","lastname":"Wall","age":28,"gender":"F","address":"364 Crosby Avenue","employer":"Aquoavo","email":"margueritewall@aquoavo.com","city":"Jeff","state":"MI"} +{"index":{"_id":"753"}} +{"account_number":753,"balance":33340,"firstname":"Katina","lastname":"Alford","age":21,"gender":"F","address":"690 Ross Street","employer":"Intrawear","email":"katinaalford@intrawear.com","city":"Grimsley","state":"OK"} +{"index":{"_id":"758"}} +{"account_number":758,"balance":15739,"firstname":"Berta","lastname":"Short","age":28,"gender":"M","address":"149 Surf Avenue","employer":"Ozean","email":"bertashort@ozean.com","city":"Odessa","state":"UT"} +{"index":{"_id":"760"}} +{"account_number":760,"balance":40996,"firstname":"Rhea","lastname":"Blair","age":37,"gender":"F","address":"440 Hubbard Place","employer":"Bicol","email":"rheablair@bicol.com","city":"Stockwell","state":"LA"} +{"index":{"_id":"765"}} +{"account_number":765,"balance":31278,"firstname":"Knowles","lastname":"Cunningham","age":23,"gender":"M","address":"753 Macdougal Street","employer":"Thredz","email":"knowlescunningham@thredz.com","city":"Thomasville","state":"WA"} +{"index":{"_id":"772"}} +{"account_number":772,"balance":37849,"firstname":"Eloise","lastname":"Sparks","age":21,"gender":"M","address":"608 Willow Street","employer":"Satiance","email":"eloisesparks@satiance.com","city":"Richford","state":"NY"} +{"index":{"_id":"777"}} +{"account_number":777,"balance":48294,"firstname":"Adkins","lastname":"Mejia","age":32,"gender":"M","address":"186 Oxford Walk","employer":"Datagen","email":"adkinsmejia@datagen.com","city":"Faywood","state":"OK"} +{"index":{"_id":"784"}} +{"account_number":784,"balance":25291,"firstname":"Mabel","lastname":"Thornton","age":21,"gender":"M","address":"124 Louisiana Avenue","employer":"Zolavo","email":"mabelthornton@zolavo.com","city":"Lynn","state":"AL"} +{"index":{"_id":"789"}} +{"account_number":789,"balance":8760,"firstname":"Cunningham","lastname":"Kerr","age":27,"gender":"F","address":"154 Sharon Street","employer":"Polarium","email":"cunninghamkerr@polarium.com","city":"Tuskahoma","state":"MS"} +{"index":{"_id":"791"}} +{"account_number":791,"balance":48249,"firstname":"Janine","lastname":"Huber","age":38,"gender":"F","address":"348 Porter Avenue","employer":"Viocular","email":"janinehuber@viocular.com","city":"Fivepointville","state":"MA"} +{"index":{"_id":"796"}} +{"account_number":796,"balance":23503,"firstname":"Mona","lastname":"Craft","age":35,"gender":"F","address":"511 Henry Street","employer":"Opticom","email":"monacraft@opticom.com","city":"Websterville","state":"IN"} +{"index":{"_id":"804"}} +{"account_number":804,"balance":23610,"firstname":"Rojas","lastname":"Oneal","age":27,"gender":"M","address":"669 Sandford Street","employer":"Glukgluk","email":"rojasoneal@glukgluk.com","city":"Wheaton","state":"ME"} +{"index":{"_id":"809"}} +{"account_number":809,"balance":47812,"firstname":"Christie","lastname":"Strickland","age":30,"gender":"M","address":"346 Bancroft Place","employer":"Anarco","email":"christiestrickland@anarco.com","city":"Baden","state":"NV"} +{"index":{"_id":"811"}} +{"account_number":811,"balance":26007,"firstname":"Walls","lastname":"Rogers","age":28,"gender":"F","address":"352 Freeman Street","employer":"Geekmosis","email":"wallsrogers@geekmosis.com","city":"Caroleen","state":"NV"} +{"index":{"_id":"816"}} +{"account_number":816,"balance":9567,"firstname":"Cornelia","lastname":"Lane","age":20,"gender":"F","address":"384 Bainbridge Street","employer":"Sulfax","email":"cornelialane@sulfax.com","city":"Elizaville","state":"MS"} +{"index":{"_id":"823"}} +{"account_number":823,"balance":48726,"firstname":"Celia","lastname":"Bernard","age":33,"gender":"F","address":"466 Amboy Street","employer":"Mitroc","email":"celiabernard@mitroc.com","city":"Skyland","state":"GA"} +{"index":{"_id":"828"}} +{"account_number":828,"balance":44890,"firstname":"Blanche","lastname":"Holmes","age":33,"gender":"F","address":"605 Stryker Court","employer":"Motovate","email":"blancheholmes@motovate.com","city":"Loomis","state":"KS"} +{"index":{"_id":"830"}} +{"account_number":830,"balance":45210,"firstname":"Louella","lastname":"Chan","age":23,"gender":"M","address":"511 Heath Place","employer":"Conferia","email":"louellachan@conferia.com","city":"Brookfield","state":"OK"} +{"index":{"_id":"835"}} +{"account_number":835,"balance":46558,"firstname":"Glover","lastname":"Rutledge","age":25,"gender":"F","address":"641 Royce Street","employer":"Ginkogene","email":"gloverrutledge@ginkogene.com","city":"Dixonville","state":"VA"} +{"index":{"_id":"842"}} +{"account_number":842,"balance":49587,"firstname":"Meagan","lastname":"Buckner","age":23,"gender":"F","address":"833 Bushwick Court","employer":"Biospan","email":"meaganbuckner@biospan.com","city":"Craig","state":"TX"} +{"index":{"_id":"847"}} +{"account_number":847,"balance":8652,"firstname":"Antonia","lastname":"Duncan","age":23,"gender":"M","address":"644 Stryker Street","employer":"Talae","email":"antoniaduncan@talae.com","city":"Dawn","state":"MO"} +{"index":{"_id":"854"}} +{"account_number":854,"balance":49795,"firstname":"Jimenez","lastname":"Barry","age":25,"gender":"F","address":"603 Cooper Street","employer":"Verton","email":"jimenezbarry@verton.com","city":"Moscow","state":"AL"} +{"index":{"_id":"859"}} +{"account_number":859,"balance":20734,"firstname":"Beulah","lastname":"Stuart","age":24,"gender":"F","address":"651 Albemarle Terrace","employer":"Hatology","email":"beulahstuart@hatology.com","city":"Waiohinu","state":"RI"} +{"index":{"_id":"861"}} +{"account_number":861,"balance":44173,"firstname":"Jaime","lastname":"Wilson","age":35,"gender":"M","address":"680 Richardson Street","employer":"Temorak","email":"jaimewilson@temorak.com","city":"Fidelis","state":"FL"} +{"index":{"_id":"866"}} +{"account_number":866,"balance":45565,"firstname":"Araceli","lastname":"Woodward","age":28,"gender":"M","address":"326 Meadow Street","employer":"Olympix","email":"araceliwoodward@olympix.com","city":"Dana","state":"KS"} +{"index":{"_id":"873"}} +{"account_number":873,"balance":43931,"firstname":"Tisha","lastname":"Cotton","age":39,"gender":"F","address":"432 Lincoln Road","employer":"Buzzmaker","email":"tishacotton@buzzmaker.com","city":"Bluetown","state":"GA"} +{"index":{"_id":"878"}} +{"account_number":878,"balance":49159,"firstname":"Battle","lastname":"Blackburn","age":40,"gender":"F","address":"234 Hendrix Street","employer":"Zilphur","email":"battleblackburn@zilphur.com","city":"Wanamie","state":"PA"} +{"index":{"_id":"880"}} +{"account_number":880,"balance":22575,"firstname":"Christian","lastname":"Myers","age":35,"gender":"M","address":"737 Crown Street","employer":"Combogen","email":"christianmyers@combogen.com","city":"Abrams","state":"OK"} +{"index":{"_id":"885"}} +{"account_number":885,"balance":31661,"firstname":"Valdez","lastname":"Roberson","age":40,"gender":"F","address":"227 Scholes Street","employer":"Delphide","email":"valdezroberson@delphide.com","city":"Chilton","state":"MT"} +{"index":{"_id":"892"}} +{"account_number":892,"balance":44974,"firstname":"Hill","lastname":"Hayes","age":29,"gender":"M","address":"721 Dooley Street","employer":"Fuelton","email":"hillhayes@fuelton.com","city":"Orason","state":"MT"} +{"index":{"_id":"897"}} +{"account_number":897,"balance":45973,"firstname":"Alyson","lastname":"Irwin","age":25,"gender":"M","address":"731 Poplar Street","employer":"Quizka","email":"alysonirwin@quizka.com","city":"Singer","state":"VA"} +{"index":{"_id":"900"}} +{"account_number":900,"balance":6124,"firstname":"Gonzalez","lastname":"Watson","age":23,"gender":"M","address":"624 Sullivan Street","employer":"Marvane","email":"gonzalezwatson@marvane.com","city":"Wikieup","state":"IL"} +{"index":{"_id":"905"}} +{"account_number":905,"balance":29438,"firstname":"Schultz","lastname":"Moreno","age":20,"gender":"F","address":"761 Cedar Street","employer":"Paragonia","email":"schultzmoreno@paragonia.com","city":"Glenshaw","state":"SC"} +{"index":{"_id":"912"}} +{"account_number":912,"balance":13675,"firstname":"Flora","lastname":"Alvarado","age":26,"gender":"M","address":"771 Vandervoort Avenue","employer":"Boilicon","email":"floraalvarado@boilicon.com","city":"Vivian","state":"ID"} +{"index":{"_id":"917"}} +{"account_number":917,"balance":47782,"firstname":"Parks","lastname":"Hurst","age":24,"gender":"M","address":"933 Cozine Avenue","employer":"Pyramis","email":"parkshurst@pyramis.com","city":"Lindcove","state":"GA"} +{"index":{"_id":"924"}} +{"account_number":924,"balance":3811,"firstname":"Hilary","lastname":"Leonard","age":24,"gender":"M","address":"235 Hegeman Avenue","employer":"Metroz","email":"hilaryleonard@metroz.com","city":"Roosevelt","state":"ME"} +{"index":{"_id":"929"}} +{"account_number":929,"balance":34708,"firstname":"Willie","lastname":"Hickman","age":35,"gender":"M","address":"430 Devoe Street","employer":"Apextri","email":"williehickman@apextri.com","city":"Clay","state":"MS"} +{"index":{"_id":"931"}} +{"account_number":931,"balance":8244,"firstname":"Ingrid","lastname":"Garcia","age":23,"gender":"F","address":"674 Indiana Place","employer":"Balooba","email":"ingridgarcia@balooba.com","city":"Interlochen","state":"AZ"} +{"index":{"_id":"936"}} +{"account_number":936,"balance":22430,"firstname":"Beth","lastname":"Frye","age":36,"gender":"M","address":"462 Thatford Avenue","employer":"Puria","email":"bethfrye@puria.com","city":"Hiseville","state":"LA"} +{"index":{"_id":"943"}} +{"account_number":943,"balance":24187,"firstname":"Wagner","lastname":"Griffin","age":23,"gender":"M","address":"489 Ellery Street","employer":"Gazak","email":"wagnergriffin@gazak.com","city":"Lorraine","state":"HI"} +{"index":{"_id":"948"}} +{"account_number":948,"balance":37074,"firstname":"Sargent","lastname":"Powers","age":40,"gender":"M","address":"532 Fiske Place","employer":"Accuprint","email":"sargentpowers@accuprint.com","city":"Umapine","state":"AK"} +{"index":{"_id":"950"}} +{"account_number":950,"balance":30916,"firstname":"Sherrie","lastname":"Patel","age":32,"gender":"F","address":"658 Langham Street","employer":"Futurize","email":"sherriepatel@futurize.com","city":"Garfield","state":"OR"} +{"index":{"_id":"955"}} +{"account_number":955,"balance":41621,"firstname":"Klein","lastname":"Kemp","age":33,"gender":"M","address":"370 Vanderbilt Avenue","employer":"Synkgen","email":"kleinkemp@synkgen.com","city":"Bonanza","state":"FL"} +{"index":{"_id":"962"}} +{"account_number":962,"balance":32096,"firstname":"Trujillo","lastname":"Wilcox","age":21,"gender":"F","address":"914 Duffield Street","employer":"Extragene","email":"trujillowilcox@extragene.com","city":"Golconda","state":"MA"} +{"index":{"_id":"967"}} +{"account_number":967,"balance":19161,"firstname":"Carrie","lastname":"Huffman","age":36,"gender":"F","address":"240 Sands Street","employer":"Injoy","email":"carriehuffman@injoy.com","city":"Leroy","state":"CA"} +{"index":{"_id":"974"}} +{"account_number":974,"balance":38082,"firstname":"Deborah","lastname":"Yang","age":26,"gender":"F","address":"463 Goodwin Place","employer":"Entogrok","email":"deborahyang@entogrok.com","city":"Herald","state":"KY"} +{"index":{"_id":"979"}} +{"account_number":979,"balance":43130,"firstname":"Vaughn","lastname":"Pittman","age":29,"gender":"M","address":"446 Tompkins Place","employer":"Phormula","email":"vaughnpittman@phormula.com","city":"Fingerville","state":"WI"} +{"index":{"_id":"981"}} +{"account_number":981,"balance":20278,"firstname":"Nolan","lastname":"Warner","age":29,"gender":"F","address":"753 Channel Avenue","employer":"Interodeo","email":"nolanwarner@interodeo.com","city":"Layhill","state":"MT"} +{"index":{"_id":"986"}} +{"account_number":986,"balance":35086,"firstname":"Norris","lastname":"Hubbard","age":31,"gender":"M","address":"600 Celeste Court","employer":"Printspan","email":"norrishubbard@printspan.com","city":"Cassel","state":"MI"} +{"index":{"_id":"993"}} +{"account_number":993,"balance":26487,"firstname":"Campos","lastname":"Olsen","age":37,"gender":"M","address":"873 Covert Street","employer":"Isbol","email":"camposolsen@isbol.com","city":"Glendale","state":"AK"} +{"index":{"_id":"998"}} +{"account_number":998,"balance":16869,"firstname":"Letha","lastname":"Baker","age":40,"gender":"F","address":"206 Llama Court","employer":"Dognosis","email":"lethabaker@dognosis.com","city":"Dunlo","state":"WV"} +{"index":{"_id":"2"}} +{"account_number":2,"balance":28838,"firstname":"Roberta","lastname":"Bender","age":22,"gender":"F","address":"560 Kingsway Place","employer":"Chillium","email":"robertabender@chillium.com","city":"Bennett","state":"LA"} +{"index":{"_id":"7"}} +{"account_number":7,"balance":39121,"firstname":"Levy","lastname":"Richard","age":22,"gender":"M","address":"820 Logan Street","employer":"Teraprene","email":"levyrichard@teraprene.com","city":"Shrewsbury","state":"MO"} +{"index":{"_id":"14"}} +{"account_number":14,"balance":20480,"firstname":"Erma","lastname":"Kane","age":39,"gender":"F","address":"661 Vista Place","employer":"Stockpost","email":"ermakane@stockpost.com","city":"Chamizal","state":"NY"} +{"index":{"_id":"19"}} +{"account_number":19,"balance":27894,"firstname":"Schwartz","lastname":"Buchanan","age":28,"gender":"F","address":"449 Mersereau Court","employer":"Sybixtex","email":"schwartzbuchanan@sybixtex.com","city":"Greenwich","state":"KS"} +{"index":{"_id":"21"}} +{"account_number":21,"balance":7004,"firstname":"Estella","lastname":"Paul","age":38,"gender":"M","address":"859 Portal Street","employer":"Zillatide","email":"estellapaul@zillatide.com","city":"Churchill","state":"WV"} +{"index":{"_id":"26"}} +{"account_number":26,"balance":14127,"firstname":"Lorraine","lastname":"Mccullough","age":39,"gender":"F","address":"157 Dupont Street","employer":"Zosis","email":"lorrainemccullough@zosis.com","city":"Dennard","state":"NH"} +{"index":{"_id":"33"}} +{"account_number":33,"balance":35439,"firstname":"Savannah","lastname":"Kirby","age":30,"gender":"F","address":"372 Malta Street","employer":"Musanpoly","email":"savannahkirby@musanpoly.com","city":"Muse","state":"AK"} +{"index":{"_id":"38"}} +{"account_number":38,"balance":10511,"firstname":"Erna","lastname":"Fields","age":32,"gender":"M","address":"357 Maple Street","employer":"Eweville","email":"ernafields@eweville.com","city":"Twilight","state":"MS"} +{"index":{"_id":"40"}} +{"account_number":40,"balance":33882,"firstname":"Pace","lastname":"Molina","age":40,"gender":"M","address":"263 Ovington Court","employer":"Cytrak","email":"pacemolina@cytrak.com","city":"Silkworth","state":"OR"} +{"index":{"_id":"45"}} +{"account_number":45,"balance":44478,"firstname":"Geneva","lastname":"Morin","age":21,"gender":"F","address":"357 Herkimer Street","employer":"Ezent","email":"genevamorin@ezent.com","city":"Blanco","state":"AZ"} +{"index":{"_id":"52"}} +{"account_number":52,"balance":46425,"firstname":"Kayla","lastname":"Bradshaw","age":31,"gender":"M","address":"449 Barlow Drive","employer":"Magnemo","email":"kaylabradshaw@magnemo.com","city":"Wawona","state":"AZ"} +{"index":{"_id":"57"}} +{"account_number":57,"balance":8705,"firstname":"Powell","lastname":"Herring","age":21,"gender":"M","address":"263 Merit Court","employer":"Digiprint","email":"powellherring@digiprint.com","city":"Coral","state":"MT"} +{"index":{"_id":"64"}} +{"account_number":64,"balance":44036,"firstname":"Miles","lastname":"Battle","age":35,"gender":"F","address":"988 Homecrest Avenue","employer":"Koffee","email":"milesbattle@koffee.com","city":"Motley","state":"ID"} +{"index":{"_id":"69"}} +{"account_number":69,"balance":14253,"firstname":"Desiree","lastname":"Harrison","age":24,"gender":"M","address":"694 Garland Court","employer":"Barkarama","email":"desireeharrison@barkarama.com","city":"Hackneyville","state":"GA"} +{"index":{"_id":"71"}} +{"account_number":71,"balance":38201,"firstname":"Sharpe","lastname":"Hoffman","age":39,"gender":"F","address":"450 Conklin Avenue","employer":"Centree","email":"sharpehoffman@centree.com","city":"Urbana","state":"WY"} +{"index":{"_id":"76"}} +{"account_number":76,"balance":38345,"firstname":"Claudette","lastname":"Beard","age":24,"gender":"F","address":"748 Dorset Street","employer":"Repetwire","email":"claudettebeard@repetwire.com","city":"Caln","state":"TX"} +{"index":{"_id":"83"}} +{"account_number":83,"balance":35928,"firstname":"Mayo","lastname":"Cleveland","age":28,"gender":"M","address":"720 Brooklyn Road","employer":"Indexia","email":"mayocleveland@indexia.com","city":"Roberts","state":"ND"} +{"index":{"_id":"88"}} +{"account_number":88,"balance":26418,"firstname":"Adela","lastname":"Tyler","age":21,"gender":"F","address":"737 Clove Road","employer":"Surelogic","email":"adelatyler@surelogic.com","city":"Boling","state":"SD"} +{"index":{"_id":"90"}} +{"account_number":90,"balance":25332,"firstname":"Herman","lastname":"Snyder","age":22,"gender":"F","address":"737 College Place","employer":"Lunchpod","email":"hermansnyder@lunchpod.com","city":"Flintville","state":"IA"} +{"index":{"_id":"95"}} +{"account_number":95,"balance":1650,"firstname":"Dominguez","lastname":"Le","age":20,"gender":"M","address":"539 Grace Court","employer":"Portica","email":"dominguezle@portica.com","city":"Wollochet","state":"KS"} +{"index":{"_id":"103"}} +{"account_number":103,"balance":11253,"firstname":"Calhoun","lastname":"Bruce","age":33,"gender":"F","address":"731 Clarkson Avenue","employer":"Automon","email":"calhounbruce@automon.com","city":"Marienthal","state":"IL"} +{"index":{"_id":"108"}} +{"account_number":108,"balance":19015,"firstname":"Christensen","lastname":"Weaver","age":21,"gender":"M","address":"398 Dearborn Court","employer":"Quilk","email":"christensenweaver@quilk.com","city":"Belvoir","state":"TX"} +{"index":{"_id":"110"}} +{"account_number":110,"balance":4850,"firstname":"Daphne","lastname":"Byrd","age":23,"gender":"F","address":"239 Conover Street","employer":"Freakin","email":"daphnebyrd@freakin.com","city":"Taft","state":"MN"} +{"index":{"_id":"115"}} +{"account_number":115,"balance":18750,"firstname":"Nikki","lastname":"Doyle","age":31,"gender":"F","address":"537 Clara Street","employer":"Fossiel","email":"nikkidoyle@fossiel.com","city":"Caron","state":"MS"} +{"index":{"_id":"122"}} +{"account_number":122,"balance":17128,"firstname":"Aurora","lastname":"Fry","age":31,"gender":"F","address":"227 Knapp Street","employer":"Makingway","email":"aurorafry@makingway.com","city":"Maybell","state":"NE"} +{"index":{"_id":"127"}} +{"account_number":127,"balance":48734,"firstname":"Diann","lastname":"Mclaughlin","age":33,"gender":"F","address":"340 Clermont Avenue","employer":"Enomen","email":"diannmclaughlin@enomen.com","city":"Rutherford","state":"ND"} +{"index":{"_id":"134"}} +{"account_number":134,"balance":33829,"firstname":"Madelyn","lastname":"Norris","age":30,"gender":"F","address":"176 Noel Avenue","employer":"Endicil","email":"madelynnorris@endicil.com","city":"Walker","state":"NE"} +{"index":{"_id":"139"}} +{"account_number":139,"balance":18444,"firstname":"Rios","lastname":"Todd","age":35,"gender":"F","address":"281 Georgia Avenue","employer":"Uberlux","email":"riostodd@uberlux.com","city":"Hannasville","state":"PA"} +{"index":{"_id":"141"}} +{"account_number":141,"balance":20790,"firstname":"Liliana","lastname":"Caldwell","age":29,"gender":"M","address":"414 Huron Street","employer":"Rubadub","email":"lilianacaldwell@rubadub.com","city":"Hiwasse","state":"OK"} +{"index":{"_id":"146"}} +{"account_number":146,"balance":39078,"firstname":"Lang","lastname":"Kaufman","age":32,"gender":"F","address":"626 Beverley Road","employer":"Rodeomad","email":"langkaufman@rodeomad.com","city":"Mahtowa","state":"RI"} +{"index":{"_id":"153"}} +{"account_number":153,"balance":32074,"firstname":"Bird","lastname":"Cochran","age":31,"gender":"F","address":"691 Bokee Court","employer":"Supremia","email":"birdcochran@supremia.com","city":"Barrelville","state":"NE"} +{"index":{"_id":"158"}} +{"account_number":158,"balance":9380,"firstname":"Natalie","lastname":"Mcdowell","age":27,"gender":"M","address":"953 Roder Avenue","employer":"Myopium","email":"nataliemcdowell@myopium.com","city":"Savage","state":"ND"} +{"index":{"_id":"160"}} +{"account_number":160,"balance":48974,"firstname":"Hull","lastname":"Cherry","age":23,"gender":"F","address":"275 Beaumont Street","employer":"Noralex","email":"hullcherry@noralex.com","city":"Whipholt","state":"WA"} +{"index":{"_id":"165"}} +{"account_number":165,"balance":18956,"firstname":"Sims","lastname":"Mckay","age":40,"gender":"F","address":"205 Jackson Street","employer":"Comtour","email":"simsmckay@comtour.com","city":"Tilden","state":"DC"} +{"index":{"_id":"172"}} +{"account_number":172,"balance":18356,"firstname":"Marie","lastname":"Whitehead","age":20,"gender":"M","address":"704 Monaco Place","employer":"Sultrax","email":"mariewhitehead@sultrax.com","city":"Dragoon","state":"IL"} +{"index":{"_id":"177"}} +{"account_number":177,"balance":48972,"firstname":"Harris","lastname":"Gross","age":40,"gender":"F","address":"468 Suydam Street","employer":"Kidstock","email":"harrisgross@kidstock.com","city":"Yettem","state":"KY"} +{"index":{"_id":"184"}} +{"account_number":184,"balance":9157,"firstname":"Cathy","lastname":"Morrison","age":27,"gender":"M","address":"882 Pine Street","employer":"Zytrek","email":"cathymorrison@zytrek.com","city":"Fedora","state":"FL"} +{"index":{"_id":"189"}} +{"account_number":189,"balance":20167,"firstname":"Ada","lastname":"Cortez","age":38,"gender":"F","address":"700 Forest Place","employer":"Micronaut","email":"adacortez@micronaut.com","city":"Eagletown","state":"TX"} +{"index":{"_id":"191"}} +{"account_number":191,"balance":26172,"firstname":"Barr","lastname":"Sharpe","age":28,"gender":"M","address":"428 Auburn Place","employer":"Ziggles","email":"barrsharpe@ziggles.com","city":"Springdale","state":"KS"} +{"index":{"_id":"196"}} +{"account_number":196,"balance":29931,"firstname":"Caldwell","lastname":"Daniel","age":28,"gender":"F","address":"405 Oliver Street","employer":"Furnigeer","email":"caldwelldaniel@furnigeer.com","city":"Zortman","state":"NE"} +{"index":{"_id":"204"}} +{"account_number":204,"balance":27714,"firstname":"Mavis","lastname":"Deleon","age":39,"gender":"F","address":"400 Waldane Court","employer":"Lotron","email":"mavisdeleon@lotron.com","city":"Stollings","state":"LA"} +{"index":{"_id":"209"}} +{"account_number":209,"balance":31052,"firstname":"Myers","lastname":"Noel","age":30,"gender":"F","address":"691 Alton Place","employer":"Greeker","email":"myersnoel@greeker.com","city":"Hinsdale","state":"KY"} +{"index":{"_id":"211"}} +{"account_number":211,"balance":21539,"firstname":"Graciela","lastname":"Vaughan","age":22,"gender":"M","address":"558 Montauk Court","employer":"Fishland","email":"gracielavaughan@fishland.com","city":"Madrid","state":"PA"} +{"index":{"_id":"216"}} +{"account_number":216,"balance":11422,"firstname":"Price","lastname":"Haley","age":35,"gender":"M","address":"233 Portland Avenue","employer":"Zeam","email":"pricehaley@zeam.com","city":"Titanic","state":"UT"} +{"index":{"_id":"223"}} +{"account_number":223,"balance":9528,"firstname":"Newton","lastname":"Fletcher","age":26,"gender":"F","address":"654 Dewitt Avenue","employer":"Assistia","email":"newtonfletcher@assistia.com","city":"Nipinnawasee","state":"AK"} +{"index":{"_id":"228"}} +{"account_number":228,"balance":10543,"firstname":"Rosella","lastname":"Albert","age":20,"gender":"M","address":"185 Gotham Avenue","employer":"Isoplex","email":"rosellaalbert@isoplex.com","city":"Finzel","state":"NY"} +{"index":{"_id":"230"}} +{"account_number":230,"balance":10829,"firstname":"Chris","lastname":"Raymond","age":28,"gender":"F","address":"464 Remsen Street","employer":"Cogentry","email":"chrisraymond@cogentry.com","city":"Bowmansville","state":"SD"} +{"index":{"_id":"235"}} +{"account_number":235,"balance":17729,"firstname":"Mcpherson","lastname":"Mueller","age":31,"gender":"M","address":"541 Strong Place","employer":"Tingles","email":"mcphersonmueller@tingles.com","city":"Brantleyville","state":"AR"} +{"index":{"_id":"242"}} +{"account_number":242,"balance":42318,"firstname":"Berger","lastname":"Roach","age":21,"gender":"M","address":"125 Wakeman Place","employer":"Ovium","email":"bergerroach@ovium.com","city":"Hessville","state":"WI"} +{"index":{"_id":"247"}} +{"account_number":247,"balance":45123,"firstname":"Mccormick","lastname":"Moon","age":37,"gender":"M","address":"582 Brighton Avenue","employer":"Norsup","email":"mccormickmoon@norsup.com","city":"Forestburg","state":"DE"} +{"index":{"_id":"254"}} +{"account_number":254,"balance":35104,"firstname":"Yang","lastname":"Dodson","age":21,"gender":"M","address":"531 Lott Street","employer":"Mondicil","email":"yangdodson@mondicil.com","city":"Enoree","state":"UT"} +{"index":{"_id":"259"}} +{"account_number":259,"balance":41877,"firstname":"Eleanor","lastname":"Gonzalez","age":30,"gender":"M","address":"800 Sumpter Street","employer":"Futuris","email":"eleanorgonzalez@futuris.com","city":"Jenkinsville","state":"ID"} +{"index":{"_id":"261"}} +{"account_number":261,"balance":39998,"firstname":"Millicent","lastname":"Pickett","age":34,"gender":"F","address":"722 Montieth Street","employer":"Gushkool","email":"millicentpickett@gushkool.com","city":"Norwood","state":"MS"} +{"index":{"_id":"266"}} +{"account_number":266,"balance":2777,"firstname":"Monique","lastname":"Conner","age":35,"gender":"F","address":"489 Metrotech Courtr","employer":"Flotonic","email":"moniqueconner@flotonic.com","city":"Retsof","state":"MD"} +{"index":{"_id":"273"}} +{"account_number":273,"balance":11181,"firstname":"Murphy","lastname":"Chandler","age":20,"gender":"F","address":"569 Bradford Street","employer":"Zilch","email":"murphychandler@zilch.com","city":"Vicksburg","state":"FL"} +{"index":{"_id":"278"}} +{"account_number":278,"balance":22530,"firstname":"Tamra","lastname":"Navarro","age":27,"gender":"F","address":"175 Woodruff Avenue","employer":"Norsul","email":"tamranavarro@norsul.com","city":"Glasgow","state":"VT"} +{"index":{"_id":"280"}} +{"account_number":280,"balance":3380,"firstname":"Vilma","lastname":"Shields","age":26,"gender":"F","address":"133 Berriman Street","employer":"Applidec","email":"vilmashields@applidec.com","city":"Adamstown","state":"ME"} +{"index":{"_id":"285"}} +{"account_number":285,"balance":47369,"firstname":"Hilda","lastname":"Phillips","age":28,"gender":"F","address":"618 Nixon Court","employer":"Comcur","email":"hildaphillips@comcur.com","city":"Siglerville","state":"NC"} +{"index":{"_id":"292"}} +{"account_number":292,"balance":26679,"firstname":"Morrow","lastname":"Greene","age":20,"gender":"F","address":"691 Nassau Street","employer":"Columella","email":"morrowgreene@columella.com","city":"Sanborn","state":"FL"} +{"index":{"_id":"297"}} +{"account_number":297,"balance":20508,"firstname":"Tucker","lastname":"Patrick","age":35,"gender":"F","address":"978 Whitwell Place","employer":"Valreda","email":"tuckerpatrick@valreda.com","city":"Deseret","state":"CO"} +{"index":{"_id":"300"}} +{"account_number":300,"balance":25654,"firstname":"Lane","lastname":"Tate","age":26,"gender":"F","address":"632 Kay Court","employer":"Genesynk","email":"lanetate@genesynk.com","city":"Lowell","state":"MO"} +{"index":{"_id":"305"}} +{"account_number":305,"balance":11655,"firstname":"Augusta","lastname":"Winters","age":29,"gender":"F","address":"377 Paerdegat Avenue","employer":"Vendblend","email":"augustawinters@vendblend.com","city":"Gwynn","state":"MA"} +{"index":{"_id":"312"}} +{"account_number":312,"balance":8511,"firstname":"Burgess","lastname":"Gentry","age":25,"gender":"F","address":"382 Bergen Court","employer":"Orbixtar","email":"burgessgentry@orbixtar.com","city":"Conestoga","state":"WI"} +{"index":{"_id":"317"}} +{"account_number":317,"balance":31968,"firstname":"Ruiz","lastname":"Morris","age":31,"gender":"F","address":"972 Dean Street","employer":"Apex","email":"ruizmorris@apex.com","city":"Jacksonwald","state":"WV"} +{"index":{"_id":"324"}} +{"account_number":324,"balance":44976,"firstname":"Gladys","lastname":"Erickson","age":22,"gender":"M","address":"250 Battery Avenue","employer":"Eternis","email":"gladyserickson@eternis.com","city":"Marne","state":"IA"} +{"index":{"_id":"329"}} +{"account_number":329,"balance":31138,"firstname":"Nellie","lastname":"Mercer","age":25,"gender":"M","address":"967 Ebony Court","employer":"Scenty","email":"nelliemercer@scenty.com","city":"Jardine","state":"AK"} +{"index":{"_id":"331"}} +{"account_number":331,"balance":46004,"firstname":"Gibson","lastname":"Potts","age":34,"gender":"F","address":"994 Dahill Road","employer":"Zensus","email":"gibsonpotts@zensus.com","city":"Frizzleburg","state":"CO"} +{"index":{"_id":"336"}} +{"account_number":336,"balance":40891,"firstname":"Dudley","lastname":"Avery","age":25,"gender":"M","address":"405 Powers Street","employer":"Genmom","email":"dudleyavery@genmom.com","city":"Clarksburg","state":"CO"} +{"index":{"_id":"343"}} +{"account_number":343,"balance":37684,"firstname":"Robbie","lastname":"Logan","age":29,"gender":"M","address":"488 Linden Boulevard","employer":"Hydrocom","email":"robbielogan@hydrocom.com","city":"Stockdale","state":"TN"} +{"index":{"_id":"348"}} +{"account_number":348,"balance":1360,"firstname":"Karina","lastname":"Russell","age":37,"gender":"M","address":"797 Moffat Street","employer":"Limozen","email":"karinarussell@limozen.com","city":"Riegelwood","state":"RI"} +{"index":{"_id":"350"}} +{"account_number":350,"balance":4267,"firstname":"Wyatt","lastname":"Wise","age":22,"gender":"F","address":"896 Bleecker Street","employer":"Rockyard","email":"wyattwise@rockyard.com","city":"Joes","state":"MS"} +{"index":{"_id":"355"}} +{"account_number":355,"balance":40961,"firstname":"Gregory","lastname":"Delacruz","age":38,"gender":"M","address":"876 Cortelyou Road","employer":"Oulu","email":"gregorydelacruz@oulu.com","city":"Waterloo","state":"WV"} +{"index":{"_id":"362"}} +{"account_number":362,"balance":14938,"firstname":"Jimmie","lastname":"Dejesus","age":26,"gender":"M","address":"351 Navy Walk","employer":"Ecolight","email":"jimmiedejesus@ecolight.com","city":"Berlin","state":"ME"} +{"index":{"_id":"367"}} +{"account_number":367,"balance":40458,"firstname":"Elaine","lastname":"Workman","age":20,"gender":"M","address":"188 Ridge Boulevard","employer":"Colaire","email":"elaineworkman@colaire.com","city":"Herbster","state":"AK"} +{"index":{"_id":"374"}} +{"account_number":374,"balance":19521,"firstname":"Blanchard","lastname":"Stein","age":30,"gender":"M","address":"313 Bartlett Street","employer":"Cujo","email":"blanchardstein@cujo.com","city":"Cascades","state":"OR"} +{"index":{"_id":"379"}} +{"account_number":379,"balance":12962,"firstname":"Ruthie","lastname":"Lamb","age":21,"gender":"M","address":"796 Rockaway Avenue","employer":"Incubus","email":"ruthielamb@incubus.com","city":"Hickory","state":"TX"} +{"index":{"_id":"381"}} +{"account_number":381,"balance":40978,"firstname":"Sophie","lastname":"Mays","age":31,"gender":"M","address":"261 Varanda Place","employer":"Uneeq","email":"sophiemays@uneeq.com","city":"Cressey","state":"AR"} +{"index":{"_id":"386"}} +{"account_number":386,"balance":42588,"firstname":"Wallace","lastname":"Barr","age":39,"gender":"F","address":"246 Beverly Road","employer":"Concility","email":"wallacebarr@concility.com","city":"Durham","state":"IN"} +{"index":{"_id":"393"}} +{"account_number":393,"balance":43936,"firstname":"William","lastname":"Kelly","age":24,"gender":"M","address":"178 Lawrence Avenue","employer":"Techtrix","email":"williamkelly@techtrix.com","city":"Orin","state":"PA"} +{"index":{"_id":"398"}} +{"account_number":398,"balance":8543,"firstname":"Leticia","lastname":"Duran","age":35,"gender":"F","address":"305 Senator Street","employer":"Xleen","email":"leticiaduran@xleen.com","city":"Cavalero","state":"PA"} +{"index":{"_id":"401"}} +{"account_number":401,"balance":29408,"firstname":"Contreras","lastname":"Randolph","age":38,"gender":"M","address":"104 Lewis Avenue","employer":"Inrt","email":"contrerasrandolph@inrt.com","city":"Chesapeake","state":"CT"} +{"index":{"_id":"406"}} +{"account_number":406,"balance":28127,"firstname":"Mccarthy","lastname":"Dunlap","age":28,"gender":"F","address":"684 Seacoast Terrace","employer":"Canopoly","email":"mccarthydunlap@canopoly.com","city":"Elliott","state":"NC"} +{"index":{"_id":"413"}} +{"account_number":413,"balance":15631,"firstname":"Pugh","lastname":"Hamilton","age":39,"gender":"F","address":"124 Euclid Avenue","employer":"Techade","email":"pughhamilton@techade.com","city":"Beaulieu","state":"CA"} +{"index":{"_id":"418"}} +{"account_number":418,"balance":10207,"firstname":"Reed","lastname":"Goff","age":32,"gender":"M","address":"959 Everit Street","employer":"Zillan","email":"reedgoff@zillan.com","city":"Hiko","state":"WV"} +{"index":{"_id":"420"}} +{"account_number":420,"balance":44699,"firstname":"Brandie","lastname":"Hayden","age":22,"gender":"M","address":"291 Ash Street","employer":"Digifad","email":"brandiehayden@digifad.com","city":"Spelter","state":"NM"} +{"index":{"_id":"425"}} +{"account_number":425,"balance":41308,"firstname":"Queen","lastname":"Leach","age":30,"gender":"M","address":"105 Fair Street","employer":"Magneato","email":"queenleach@magneato.com","city":"Barronett","state":"NH"} +{"index":{"_id":"432"}} +{"account_number":432,"balance":28969,"firstname":"Preston","lastname":"Ferguson","age":40,"gender":"F","address":"239 Greenwood Avenue","employer":"Bitendrex","email":"prestonferguson@bitendrex.com","city":"Idledale","state":"ND"} +{"index":{"_id":"437"}} +{"account_number":437,"balance":41225,"firstname":"Rosales","lastname":"Marquez","age":29,"gender":"M","address":"873 Ryerson Street","employer":"Ronelon","email":"rosalesmarquez@ronelon.com","city":"Allendale","state":"CA"} +{"index":{"_id":"444"}} +{"account_number":444,"balance":44219,"firstname":"Dolly","lastname":"Finch","age":24,"gender":"F","address":"974 Interborough Parkway","employer":"Zytrac","email":"dollyfinch@zytrac.com","city":"Vowinckel","state":"WY"} +{"index":{"_id":"449"}} +{"account_number":449,"balance":41950,"firstname":"Barnett","lastname":"Cantrell","age":39,"gender":"F","address":"945 Bedell Lane","employer":"Zentility","email":"barnettcantrell@zentility.com","city":"Swartzville","state":"ND"} +{"index":{"_id":"451"}} +{"account_number":451,"balance":31950,"firstname":"Mason","lastname":"Mcleod","age":31,"gender":"F","address":"438 Havemeyer Street","employer":"Omatom","email":"masonmcleod@omatom.com","city":"Ryderwood","state":"NE"} +{"index":{"_id":"456"}} +{"account_number":456,"balance":21419,"firstname":"Solis","lastname":"Kline","age":33,"gender":"M","address":"818 Ashford Street","employer":"Vetron","email":"soliskline@vetron.com","city":"Ruffin","state":"NY"} +{"index":{"_id":"463"}} +{"account_number":463,"balance":36672,"firstname":"Heidi","lastname":"Acosta","age":20,"gender":"F","address":"692 Kenmore Terrace","employer":"Elpro","email":"heidiacosta@elpro.com","city":"Ezel","state":"SD"} +{"index":{"_id":"468"}} +{"account_number":468,"balance":18400,"firstname":"Foreman","lastname":"Fowler","age":40,"gender":"M","address":"443 Jackson Court","employer":"Zillactic","email":"foremanfowler@zillactic.com","city":"Wakarusa","state":"WA"} +{"index":{"_id":"470"}} +{"account_number":470,"balance":20455,"firstname":"Schneider","lastname":"Hull","age":35,"gender":"M","address":"724 Apollo Street","employer":"Exospeed","email":"schneiderhull@exospeed.com","city":"Watchtower","state":"ID"} +{"index":{"_id":"475"}} +{"account_number":475,"balance":24427,"firstname":"Morales","lastname":"Jacobs","age":22,"gender":"F","address":"225 Desmond Court","employer":"Oronoko","email":"moralesjacobs@oronoko.com","city":"Clayville","state":"CT"} +{"index":{"_id":"482"}} +{"account_number":482,"balance":14834,"firstname":"Janie","lastname":"Bass","age":39,"gender":"M","address":"781 Grattan Street","employer":"Manglo","email":"janiebass@manglo.com","city":"Kenwood","state":"IA"} +{"index":{"_id":"487"}} +{"account_number":487,"balance":30718,"firstname":"Sawyer","lastname":"Vincent","age":26,"gender":"F","address":"238 Lancaster Avenue","employer":"Brainquil","email":"sawyervincent@brainquil.com","city":"Galesville","state":"MS"} +{"index":{"_id":"494"}} +{"account_number":494,"balance":3592,"firstname":"Holden","lastname":"Bowen","age":30,"gender":"M","address":"374 Elmwood Avenue","employer":"Endipine","email":"holdenbowen@endipine.com","city":"Rosine","state":"ID"} +{"index":{"_id":"499"}} +{"account_number":499,"balance":26060,"firstname":"Lara","lastname":"Perkins","age":26,"gender":"M","address":"703 Monroe Street","employer":"Paprikut","email":"laraperkins@paprikut.com","city":"Barstow","state":"NY"} +{"index":{"_id":"502"}} +{"account_number":502,"balance":31898,"firstname":"Woodard","lastname":"Bailey","age":31,"gender":"F","address":"585 Albee Square","employer":"Imperium","email":"woodardbailey@imperium.com","city":"Matheny","state":"MT"} +{"index":{"_id":"507"}} +{"account_number":507,"balance":27675,"firstname":"Blankenship","lastname":"Ramirez","age":31,"gender":"M","address":"630 Graham Avenue","employer":"Bytrex","email":"blankenshipramirez@bytrex.com","city":"Bancroft","state":"CT"} +{"index":{"_id":"514"}} +{"account_number":514,"balance":30125,"firstname":"Solomon","lastname":"Bush","age":34,"gender":"M","address":"409 Harkness Avenue","employer":"Snacktion","email":"solomonbush@snacktion.com","city":"Grayhawk","state":"TX"} +{"index":{"_id":"519"}} +{"account_number":519,"balance":3282,"firstname":"Lorna","lastname":"Franco","age":31,"gender":"F","address":"722 Schenck Court","employer":"Zentia","email":"lornafranco@zentia.com","city":"National","state":"FL"} +{"index":{"_id":"521"}} +{"account_number":521,"balance":16348,"firstname":"Josefa","lastname":"Buckley","age":34,"gender":"F","address":"848 Taylor Street","employer":"Mazuda","email":"josefabuckley@mazuda.com","city":"Saranap","state":"NM"} +{"index":{"_id":"526"}} +{"account_number":526,"balance":35375,"firstname":"Sweeney","lastname":"Fulton","age":33,"gender":"F","address":"550 Martense Street","employer":"Cormoran","email":"sweeneyfulton@cormoran.com","city":"Chalfant","state":"IA"} +{"index":{"_id":"533"}} +{"account_number":533,"balance":13761,"firstname":"Margarita","lastname":"Diaz","age":23,"gender":"M","address":"295 Tapscott Street","employer":"Zilodyne","email":"margaritadiaz@zilodyne.com","city":"Hondah","state":"ID"} +{"index":{"_id":"538"}} +{"account_number":538,"balance":16416,"firstname":"Koch","lastname":"Barker","age":21,"gender":"M","address":"919 Gerry Street","employer":"Xplor","email":"kochbarker@xplor.com","city":"Dixie","state":"WY"} +{"index":{"_id":"540"}} +{"account_number":540,"balance":40235,"firstname":"Tammy","lastname":"Wiggins","age":32,"gender":"F","address":"186 Schenectady Avenue","employer":"Speedbolt","email":"tammywiggins@speedbolt.com","city":"Salvo","state":"LA"} +{"index":{"_id":"545"}} +{"account_number":545,"balance":27011,"firstname":"Lena","lastname":"Lucas","age":20,"gender":"M","address":"110 Lamont Court","employer":"Kindaloo","email":"lenalucas@kindaloo.com","city":"Harleigh","state":"KY"} +{"index":{"_id":"552"}} +{"account_number":552,"balance":14727,"firstname":"Kate","lastname":"Estes","age":39,"gender":"M","address":"785 Willmohr Street","employer":"Rodeocean","email":"kateestes@rodeocean.com","city":"Elfrida","state":"HI"} +{"index":{"_id":"557"}} +{"account_number":557,"balance":3119,"firstname":"Landry","lastname":"Buck","age":20,"gender":"M","address":"558 Schweikerts Walk","employer":"Protodyne","email":"landrybuck@protodyne.com","city":"Edneyville","state":"AL"} +{"index":{"_id":"564"}} +{"account_number":564,"balance":43631,"firstname":"Owens","lastname":"Bowers","age":22,"gender":"M","address":"842 Congress Street","employer":"Nspire","email":"owensbowers@nspire.com","city":"Machias","state":"VA"} +{"index":{"_id":"569"}} +{"account_number":569,"balance":40019,"firstname":"Sherri","lastname":"Rowe","age":39,"gender":"F","address":"591 Arlington Place","employer":"Netility","email":"sherrirowe@netility.com","city":"Bridgetown","state":"SC"} +{"index":{"_id":"571"}} +{"account_number":571,"balance":3014,"firstname":"Ayers","lastname":"Duffy","age":28,"gender":"F","address":"721 Wortman Avenue","employer":"Aquasseur","email":"ayersduffy@aquasseur.com","city":"Tilleda","state":"MS"} +{"index":{"_id":"576"}} +{"account_number":576,"balance":29682,"firstname":"Helena","lastname":"Robertson","age":33,"gender":"F","address":"774 Devon Avenue","employer":"Vicon","email":"helenarobertson@vicon.com","city":"Dyckesville","state":"NV"} +{"index":{"_id":"583"}} +{"account_number":583,"balance":26558,"firstname":"Castro","lastname":"West","age":34,"gender":"F","address":"814 Williams Avenue","employer":"Cipromox","email":"castrowest@cipromox.com","city":"Nescatunga","state":"IL"} +{"index":{"_id":"588"}} +{"account_number":588,"balance":43531,"firstname":"Martina","lastname":"Collins","age":31,"gender":"M","address":"301 Anna Court","employer":"Geekwagon","email":"martinacollins@geekwagon.com","city":"Oneida","state":"VA"} +{"index":{"_id":"590"}} +{"account_number":590,"balance":4652,"firstname":"Ladonna","lastname":"Tucker","age":31,"gender":"F","address":"162 Kane Place","employer":"Infotrips","email":"ladonnatucker@infotrips.com","city":"Utting","state":"IA"} +{"index":{"_id":"595"}} +{"account_number":595,"balance":12478,"firstname":"Mccall","lastname":"Britt","age":36,"gender":"F","address":"823 Hill Street","employer":"Cablam","email":"mccallbritt@cablam.com","city":"Vernon","state":"CA"} +{"index":{"_id":"603"}} +{"account_number":603,"balance":28145,"firstname":"Janette","lastname":"Guzman","age":31,"gender":"F","address":"976 Kingston Avenue","employer":"Splinx","email":"janetteguzman@splinx.com","city":"Boomer","state":"NC"} +{"index":{"_id":"608"}} +{"account_number":608,"balance":47091,"firstname":"Carey","lastname":"Whitley","age":32,"gender":"F","address":"976 Lawrence Street","employer":"Poshome","email":"careywhitley@poshome.com","city":"Weogufka","state":"NE"} +{"index":{"_id":"610"}} +{"account_number":610,"balance":40571,"firstname":"Foster","lastname":"Weber","age":24,"gender":"F","address":"323 Rochester Avenue","employer":"Firewax","email":"fosterweber@firewax.com","city":"Winston","state":"NY"} +{"index":{"_id":"615"}} +{"account_number":615,"balance":28726,"firstname":"Delgado","lastname":"Curry","age":28,"gender":"F","address":"706 Butler Street","employer":"Zoxy","email":"delgadocurry@zoxy.com","city":"Gracey","state":"SD"} +{"index":{"_id":"622"}} +{"account_number":622,"balance":9661,"firstname":"Paulette","lastname":"Hartman","age":38,"gender":"M","address":"375 Emerald Street","employer":"Locazone","email":"paulettehartman@locazone.com","city":"Canterwood","state":"OH"} +{"index":{"_id":"627"}} +{"account_number":627,"balance":47546,"firstname":"Crawford","lastname":"Sears","age":37,"gender":"F","address":"686 Eastern Parkway","employer":"Updat","email":"crawfordsears@updat.com","city":"Bison","state":"VT"} +{"index":{"_id":"634"}} +{"account_number":634,"balance":29805,"firstname":"Deloris","lastname":"Levy","age":38,"gender":"M","address":"838 Foster Avenue","employer":"Homelux","email":"delorislevy@homelux.com","city":"Kempton","state":"PA"} +{"index":{"_id":"639"}} +{"account_number":639,"balance":28875,"firstname":"Caitlin","lastname":"Clements","age":32,"gender":"F","address":"627 Aster Court","employer":"Bunga","email":"caitlinclements@bunga.com","city":"Cetronia","state":"SC"} +{"index":{"_id":"641"}} +{"account_number":641,"balance":18345,"firstname":"Sheppard","lastname":"Everett","age":39,"gender":"F","address":"791 Norwood Avenue","employer":"Roboid","email":"sheppardeverett@roboid.com","city":"Selma","state":"AK"} +{"index":{"_id":"646"}} +{"account_number":646,"balance":15559,"firstname":"Lavonne","lastname":"Reyes","age":31,"gender":"F","address":"983 Newport Street","employer":"Parcoe","email":"lavonnereyes@parcoe.com","city":"Monument","state":"LA"} +{"index":{"_id":"653"}} +{"account_number":653,"balance":7606,"firstname":"Marcia","lastname":"Bennett","age":33,"gender":"F","address":"455 Bragg Street","employer":"Opticall","email":"marciabennett@opticall.com","city":"Magnolia","state":"NC"} +{"index":{"_id":"658"}} +{"account_number":658,"balance":10210,"firstname":"Bass","lastname":"Mcconnell","age":32,"gender":"F","address":"274 Ocean Avenue","employer":"Combot","email":"bassmcconnell@combot.com","city":"Beyerville","state":"OH"} +{"index":{"_id":"660"}} +{"account_number":660,"balance":46427,"firstname":"Moon","lastname":"Wood","age":33,"gender":"F","address":"916 Amersfort Place","employer":"Olucore","email":"moonwood@olucore.com","city":"Como","state":"VA"} +{"index":{"_id":"665"}} +{"account_number":665,"balance":15215,"firstname":"Britney","lastname":"Young","age":36,"gender":"M","address":"766 Sackman Street","employer":"Geoforma","email":"britneyyoung@geoforma.com","city":"Tuttle","state":"WI"} +{"index":{"_id":"672"}} +{"account_number":672,"balance":12621,"firstname":"Camille","lastname":"Munoz","age":36,"gender":"F","address":"959 Lewis Place","employer":"Vantage","email":"camillemunoz@vantage.com","city":"Whitmer","state":"IN"} +{"index":{"_id":"677"}} +{"account_number":677,"balance":8491,"firstname":"Snider","lastname":"Benton","age":26,"gender":"M","address":"827 Evans Street","employer":"Medicroix","email":"sniderbenton@medicroix.com","city":"Kaka","state":"UT"} +{"index":{"_id":"684"}} +{"account_number":684,"balance":46091,"firstname":"Warren","lastname":"Snow","age":25,"gender":"M","address":"756 Oakland Place","employer":"Bizmatic","email":"warrensnow@bizmatic.com","city":"Hatteras","state":"NE"} +{"index":{"_id":"689"}} +{"account_number":689,"balance":14985,"firstname":"Ines","lastname":"Chaney","age":28,"gender":"M","address":"137 Dikeman Street","employer":"Zidant","email":"ineschaney@zidant.com","city":"Nettie","state":"DC"} +{"index":{"_id":"691"}} +{"account_number":691,"balance":10792,"firstname":"Mclean","lastname":"Colon","age":22,"gender":"M","address":"876 Classon Avenue","employer":"Elentrix","email":"mcleancolon@elentrix.com","city":"Unionville","state":"OK"} +{"index":{"_id":"696"}} +{"account_number":696,"balance":17568,"firstname":"Crane","lastname":"Matthews","age":32,"gender":"F","address":"721 Gerritsen Avenue","employer":"Intradisk","email":"cranematthews@intradisk.com","city":"Brewster","state":"WV"} +{"index":{"_id":"704"}} +{"account_number":704,"balance":45347,"firstname":"Peters","lastname":"Kent","age":22,"gender":"F","address":"871 Independence Avenue","employer":"Extragen","email":"peterskent@extragen.com","city":"Morriston","state":"CA"} +{"index":{"_id":"709"}} +{"account_number":709,"balance":11015,"firstname":"Abbott","lastname":"Odom","age":29,"gender":"M","address":"893 Union Street","employer":"Jimbies","email":"abbottodom@jimbies.com","city":"Leeper","state":"NJ"} +{"index":{"_id":"711"}} +{"account_number":711,"balance":26939,"firstname":"Villarreal","lastname":"Horton","age":35,"gender":"F","address":"861 Creamer Street","employer":"Lexicondo","email":"villarrealhorton@lexicondo.com","city":"Lydia","state":"MS"} +{"index":{"_id":"716"}} +{"account_number":716,"balance":19789,"firstname":"Paul","lastname":"Mason","age":34,"gender":"F","address":"618 Nichols Avenue","employer":"Slax","email":"paulmason@slax.com","city":"Snowville","state":"OK"} +{"index":{"_id":"723"}} +{"account_number":723,"balance":16421,"firstname":"Nixon","lastname":"Moran","age":27,"gender":"M","address":"569 Campus Place","employer":"Cuizine","email":"nixonmoran@cuizine.com","city":"Buxton","state":"DC"} +{"index":{"_id":"728"}} +{"account_number":728,"balance":44818,"firstname":"Conley","lastname":"Preston","age":28,"gender":"M","address":"450 Coventry Road","employer":"Obones","email":"conleypreston@obones.com","city":"Alden","state":"CO"} +{"index":{"_id":"730"}} +{"account_number":730,"balance":41299,"firstname":"Moore","lastname":"Lee","age":30,"gender":"M","address":"797 Turner Place","employer":"Orbean","email":"moorelee@orbean.com","city":"Highland","state":"DE"} +{"index":{"_id":"735"}} +{"account_number":735,"balance":3984,"firstname":"Loraine","lastname":"Willis","age":32,"gender":"F","address":"928 Grove Street","employer":"Gadtron","email":"lorainewillis@gadtron.com","city":"Lowgap","state":"NY"} +{"index":{"_id":"742"}} +{"account_number":742,"balance":24765,"firstname":"Merle","lastname":"Wooten","age":26,"gender":"M","address":"317 Pooles Lane","employer":"Tropolis","email":"merlewooten@tropolis.com","city":"Bentley","state":"ND"} +{"index":{"_id":"747"}} +{"account_number":747,"balance":16617,"firstname":"Diaz","lastname":"Austin","age":38,"gender":"M","address":"676 Harway Avenue","employer":"Irack","email":"diazaustin@irack.com","city":"Cliff","state":"HI"} +{"index":{"_id":"754"}} +{"account_number":754,"balance":10779,"firstname":"Jones","lastname":"Vega","age":25,"gender":"F","address":"795 India Street","employer":"Gluid","email":"jonesvega@gluid.com","city":"Tyhee","state":"FL"} +{"index":{"_id":"759"}} +{"account_number":759,"balance":38007,"firstname":"Rose","lastname":"Carlson","age":27,"gender":"M","address":"987 Navy Street","employer":"Aquasure","email":"rosecarlson@aquasure.com","city":"Carlton","state":"CT"} +{"index":{"_id":"761"}} +{"account_number":761,"balance":7663,"firstname":"Rae","lastname":"Juarez","age":34,"gender":"F","address":"560 Gilmore Court","employer":"Entropix","email":"raejuarez@entropix.com","city":"Northchase","state":"ID"} +{"index":{"_id":"766"}} +{"account_number":766,"balance":21957,"firstname":"Thomas","lastname":"Gillespie","age":38,"gender":"M","address":"993 Williams Place","employer":"Octocore","email":"thomasgillespie@octocore.com","city":"Defiance","state":"MS"} +{"index":{"_id":"773"}} +{"account_number":773,"balance":31126,"firstname":"Liza","lastname":"Coffey","age":36,"gender":"F","address":"540 Bulwer Place","employer":"Assurity","email":"lizacoffey@assurity.com","city":"Gilgo","state":"WV"} +{"index":{"_id":"778"}} +{"account_number":778,"balance":46007,"firstname":"Underwood","lastname":"Wheeler","age":28,"gender":"M","address":"477 Provost Street","employer":"Decratex","email":"underwoodwheeler@decratex.com","city":"Sardis","state":"ID"} +{"index":{"_id":"780"}} +{"account_number":780,"balance":4682,"firstname":"Maryanne","lastname":"Hendricks","age":26,"gender":"F","address":"709 Wolcott Street","employer":"Sarasonic","email":"maryannehendricks@sarasonic.com","city":"Santel","state":"NH"} +{"index":{"_id":"785"}} +{"account_number":785,"balance":25078,"firstname":"Fields","lastname":"Lester","age":29,"gender":"M","address":"808 Chestnut Avenue","employer":"Visualix","email":"fieldslester@visualix.com","city":"Rowe","state":"PA"} +{"index":{"_id":"792"}} +{"account_number":792,"balance":13109,"firstname":"Becky","lastname":"Jimenez","age":40,"gender":"F","address":"539 Front Street","employer":"Isologia","email":"beckyjimenez@isologia.com","city":"Summertown","state":"MI"} +{"index":{"_id":"797"}} +{"account_number":797,"balance":6854,"firstname":"Lindsay","lastname":"Mills","age":26,"gender":"F","address":"919 Quay Street","employer":"Zoinage","email":"lindsaymills@zoinage.com","city":"Elliston","state":"VA"} +{"index":{"_id":"800"}} +{"account_number":800,"balance":26217,"firstname":"Candy","lastname":"Oconnor","age":28,"gender":"M","address":"200 Newel Street","employer":"Radiantix","email":"candyoconnor@radiantix.com","city":"Sandston","state":"OH"} +{"index":{"_id":"805"}} +{"account_number":805,"balance":18426,"firstname":"Jackson","lastname":"Sampson","age":27,"gender":"F","address":"722 Kenmore Court","employer":"Daido","email":"jacksonsampson@daido.com","city":"Bellamy","state":"ME"} +{"index":{"_id":"812"}} +{"account_number":812,"balance":42593,"firstname":"Graves","lastname":"Newman","age":32,"gender":"F","address":"916 Joralemon Street","employer":"Ecrater","email":"gravesnewman@ecrater.com","city":"Crown","state":"PA"} +{"index":{"_id":"817"}} +{"account_number":817,"balance":36582,"firstname":"Padilla","lastname":"Bauer","age":36,"gender":"F","address":"310 Cadman Plaza","employer":"Exoblue","email":"padillabauer@exoblue.com","city":"Ahwahnee","state":"MN"} +{"index":{"_id":"824"}} +{"account_number":824,"balance":6053,"firstname":"Dyer","lastname":"Henson","age":33,"gender":"M","address":"650 Seaview Avenue","employer":"Nitracyr","email":"dyerhenson@nitracyr.com","city":"Gibsonia","state":"KS"} +{"index":{"_id":"829"}} +{"account_number":829,"balance":20263,"firstname":"Althea","lastname":"Bell","age":37,"gender":"M","address":"319 Cook Street","employer":"Hyplex","email":"altheabell@hyplex.com","city":"Wadsworth","state":"DC"} +{"index":{"_id":"831"}} +{"account_number":831,"balance":25375,"firstname":"Wendy","lastname":"Savage","age":37,"gender":"M","address":"421 Veranda Place","employer":"Neurocell","email":"wendysavage@neurocell.com","city":"Fresno","state":"MS"} +{"index":{"_id":"836"}} +{"account_number":836,"balance":20797,"firstname":"Lloyd","lastname":"Lindsay","age":25,"gender":"F","address":"953 Dinsmore Place","employer":"Suretech","email":"lloydlindsay@suretech.com","city":"Conway","state":"VA"} +{"index":{"_id":"843"}} +{"account_number":843,"balance":15555,"firstname":"Patricia","lastname":"Barton","age":34,"gender":"F","address":"406 Seabring Street","employer":"Providco","email":"patriciabarton@providco.com","city":"Avoca","state":"RI"} +{"index":{"_id":"848"}} +{"account_number":848,"balance":15443,"firstname":"Carmella","lastname":"Cash","age":38,"gender":"M","address":"988 Exeter Street","employer":"Bristo","email":"carmellacash@bristo.com","city":"Northridge","state":"ID"} +{"index":{"_id":"850"}} +{"account_number":850,"balance":6531,"firstname":"Carlene","lastname":"Gaines","age":37,"gender":"F","address":"753 Monroe Place","employer":"Naxdis","email":"carlenegaines@naxdis.com","city":"Genoa","state":"OR"} +{"index":{"_id":"855"}} +{"account_number":855,"balance":40170,"firstname":"Mia","lastname":"Stevens","age":31,"gender":"F","address":"326 Driggs Avenue","employer":"Aeora","email":"miastevens@aeora.com","city":"Delwood","state":"IL"} +{"index":{"_id":"862"}} +{"account_number":862,"balance":38792,"firstname":"Clayton","lastname":"Golden","age":38,"gender":"F","address":"620 Regent Place","employer":"Accusage","email":"claytongolden@accusage.com","city":"Ona","state":"NC"} +{"index":{"_id":"867"}} +{"account_number":867,"balance":45453,"firstname":"Blanca","lastname":"Ellison","age":23,"gender":"F","address":"593 McKibben Street","employer":"Koogle","email":"blancaellison@koogle.com","city":"Frystown","state":"WY"} +{"index":{"_id":"874"}} +{"account_number":874,"balance":23079,"firstname":"Lynette","lastname":"Higgins","age":22,"gender":"M","address":"377 McKinley Avenue","employer":"Menbrain","email":"lynettehiggins@menbrain.com","city":"Manitou","state":"TX"} +{"index":{"_id":"879"}} +{"account_number":879,"balance":48332,"firstname":"Sabrina","lastname":"Lancaster","age":31,"gender":"F","address":"382 Oak Street","employer":"Webiotic","email":"sabrinalancaster@webiotic.com","city":"Lindisfarne","state":"AZ"} +{"index":{"_id":"881"}} +{"account_number":881,"balance":26684,"firstname":"Barnes","lastname":"Ware","age":38,"gender":"F","address":"666 Hooper Street","employer":"Norali","email":"barnesware@norali.com","city":"Cazadero","state":"GA"} +{"index":{"_id":"886"}} +{"account_number":886,"balance":14867,"firstname":"Willa","lastname":"Leblanc","age":38,"gender":"F","address":"773 Bergen Street","employer":"Nurali","email":"willaleblanc@nurali.com","city":"Hilltop","state":"NC"} +{"index":{"_id":"893"}} +{"account_number":893,"balance":42584,"firstname":"Moses","lastname":"Campos","age":38,"gender":"F","address":"991 Bevy Court","employer":"Trollery","email":"mosescampos@trollery.com","city":"Freetown","state":"AK"} +{"index":{"_id":"898"}} +{"account_number":898,"balance":12019,"firstname":"Lori","lastname":"Stevenson","age":29,"gender":"M","address":"910 Coles Street","employer":"Honotron","email":"loristevenson@honotron.com","city":"Shindler","state":"VT"} +{"index":{"_id":"901"}} +{"account_number":901,"balance":35038,"firstname":"Irma","lastname":"Dotson","age":23,"gender":"F","address":"245 Mayfair Drive","employer":"Bleeko","email":"irmadotson@bleeko.com","city":"Lodoga","state":"UT"} +{"index":{"_id":"906"}} +{"account_number":906,"balance":24073,"firstname":"Vicki","lastname":"Suarez","age":36,"gender":"M","address":"829 Roosevelt Place","employer":"Utara","email":"vickisuarez@utara.com","city":"Albrightsville","state":"AR"} +{"index":{"_id":"913"}} +{"account_number":913,"balance":47657,"firstname":"Margery","lastname":"Monroe","age":25,"gender":"M","address":"941 Fanchon Place","employer":"Exerta","email":"margerymonroe@exerta.com","city":"Bannock","state":"MD"} +{"index":{"_id":"918"}} +{"account_number":918,"balance":36776,"firstname":"Dianna","lastname":"Hernandez","age":25,"gender":"M","address":"499 Moultrie Street","employer":"Isologica","email":"diannahernandez@isologica.com","city":"Falconaire","state":"ID"} +{"index":{"_id":"920"}} +{"account_number":920,"balance":41513,"firstname":"Jerri","lastname":"Mitchell","age":26,"gender":"M","address":"831 Kent Street","employer":"Tasmania","email":"jerrimitchell@tasmania.com","city":"Cotopaxi","state":"IA"} +{"index":{"_id":"925"}} +{"account_number":925,"balance":18295,"firstname":"Rosario","lastname":"Jackson","age":24,"gender":"M","address":"178 Leonora Court","employer":"Progenex","email":"rosariojackson@progenex.com","city":"Rivereno","state":"DE"} +{"index":{"_id":"932"}} +{"account_number":932,"balance":3111,"firstname":"Summer","lastname":"Porter","age":33,"gender":"F","address":"949 Grand Avenue","employer":"Multiflex","email":"summerporter@multiflex.com","city":"Spokane","state":"OK"} +{"index":{"_id":"937"}} +{"account_number":937,"balance":43491,"firstname":"Selma","lastname":"Anderson","age":24,"gender":"M","address":"205 Reed Street","employer":"Dadabase","email":"selmaanderson@dadabase.com","city":"Malo","state":"AL"} +{"index":{"_id":"944"}} +{"account_number":944,"balance":46478,"firstname":"Donaldson","lastname":"Woodard","age":38,"gender":"F","address":"498 Laurel Avenue","employer":"Zogak","email":"donaldsonwoodard@zogak.com","city":"Hasty","state":"ID"} +{"index":{"_id":"949"}} +{"account_number":949,"balance":48703,"firstname":"Latasha","lastname":"Mullins","age":29,"gender":"F","address":"272 Lefferts Place","employer":"Zenolux","email":"latashamullins@zenolux.com","city":"Kieler","state":"MN"} +{"index":{"_id":"951"}} +{"account_number":951,"balance":36337,"firstname":"Tran","lastname":"Burris","age":25,"gender":"F","address":"561 Rutland Road","employer":"Geoform","email":"tranburris@geoform.com","city":"Longbranch","state":"IL"} +{"index":{"_id":"956"}} +{"account_number":956,"balance":19477,"firstname":"Randall","lastname":"Lynch","age":22,"gender":"F","address":"490 Madison Place","employer":"Cosmetex","email":"randalllynch@cosmetex.com","city":"Wells","state":"SD"} +{"index":{"_id":"963"}} +{"account_number":963,"balance":30461,"firstname":"Griffin","lastname":"Sheppard","age":20,"gender":"M","address":"682 Linden Street","employer":"Zanymax","email":"griffinsheppard@zanymax.com","city":"Fannett","state":"NM"} +{"index":{"_id":"968"}} +{"account_number":968,"balance":32371,"firstname":"Luella","lastname":"Burch","age":39,"gender":"M","address":"684 Arkansas Drive","employer":"Krag","email":"luellaburch@krag.com","city":"Brambleton","state":"SD"} +{"index":{"_id":"970"}} +{"account_number":970,"balance":19648,"firstname":"Forbes","lastname":"Wallace","age":28,"gender":"M","address":"990 Mill Road","employer":"Pheast","email":"forbeswallace@pheast.com","city":"Lopezo","state":"AK"} +{"index":{"_id":"975"}} +{"account_number":975,"balance":5239,"firstname":"Delores","lastname":"Booker","age":27,"gender":"F","address":"328 Conselyea Street","employer":"Centice","email":"deloresbooker@centice.com","city":"Williams","state":"HI"} +{"index":{"_id":"982"}} +{"account_number":982,"balance":16511,"firstname":"Buck","lastname":"Robinson","age":24,"gender":"M","address":"301 Melrose Street","employer":"Calcu","email":"buckrobinson@calcu.com","city":"Welch","state":"PA"} +{"index":{"_id":"987"}} +{"account_number":987,"balance":4072,"firstname":"Brock","lastname":"Sandoval","age":20,"gender":"F","address":"977 Gem Street","employer":"Fiberox","email":"brocksandoval@fiberox.com","city":"Celeryville","state":"NY"} +{"index":{"_id":"994"}} +{"account_number":994,"balance":33298,"firstname":"Madge","lastname":"Holcomb","age":31,"gender":"M","address":"612 Hawthorne Street","employer":"Escenta","email":"madgeholcomb@escenta.com","city":"Alafaya","state":"OR"} +{"index":{"_id":"999"}} +{"account_number":999,"balance":6087,"firstname":"Dorothy","lastname":"Barron","age":22,"gender":"F","address":"499 Laurel Avenue","employer":"Xurban","email":"dorothybarron@xurban.com","city":"Belvoir","state":"CA"} +{"index":{"_id":"4"}} +{"account_number":4,"balance":27658,"firstname":"Rodriquez","lastname":"Flores","age":31,"gender":"F","address":"986 Wyckoff Avenue","employer":"Tourmania","email":"rodriquezflores@tourmania.com","city":"Eastvale","state":"HI"} +{"index":{"_id":"9"}} +{"account_number":9,"balance":24776,"firstname":"Opal","lastname":"Meadows","age":39,"gender":"M","address":"963 Neptune Avenue","employer":"Cedward","email":"opalmeadows@cedward.com","city":"Olney","state":"OH"} +{"index":{"_id":"11"}} +{"account_number":11,"balance":20203,"firstname":"Jenkins","lastname":"Haney","age":20,"gender":"M","address":"740 Ferry Place","employer":"Qimonk","email":"jenkinshaney@qimonk.com","city":"Steinhatchee","state":"GA"} +{"index":{"_id":"16"}} +{"account_number":16,"balance":35883,"firstname":"Adrian","lastname":"Pitts","age":34,"gender":"F","address":"963 Fay Court","employer":"Combogene","email":"adrianpitts@combogene.com","city":"Remington","state":"SD"} +{"index":{"_id":"23"}} +{"account_number":23,"balance":42374,"firstname":"Kirsten","lastname":"Fox","age":20,"gender":"M","address":"330 Dumont Avenue","employer":"Codax","email":"kirstenfox@codax.com","city":"Walton","state":"AK"} +{"index":{"_id":"28"}} +{"account_number":28,"balance":42112,"firstname":"Vega","lastname":"Flynn","age":20,"gender":"M","address":"647 Hyman Court","employer":"Accupharm","email":"vegaflynn@accupharm.com","city":"Masthope","state":"OH"} +{"index":{"_id":"30"}} +{"account_number":30,"balance":19087,"firstname":"Lamb","lastname":"Townsend","age":26,"gender":"M","address":"169 Lyme Avenue","employer":"Geeknet","email":"lambtownsend@geeknet.com","city":"Epworth","state":"AL"} +{"index":{"_id":"35"}} +{"account_number":35,"balance":42039,"firstname":"Darla","lastname":"Bridges","age":27,"gender":"F","address":"315 Central Avenue","employer":"Xeronk","email":"darlabridges@xeronk.com","city":"Woodlake","state":"RI"} +{"index":{"_id":"42"}} +{"account_number":42,"balance":21137,"firstname":"Harding","lastname":"Hobbs","age":26,"gender":"F","address":"474 Ridgewood Place","employer":"Xth","email":"hardinghobbs@xth.com","city":"Heil","state":"ND"} +{"index":{"_id":"47"}} +{"account_number":47,"balance":33044,"firstname":"Georgia","lastname":"Wilkerson","age":23,"gender":"M","address":"369 Herbert Street","employer":"Endipin","email":"georgiawilkerson@endipin.com","city":"Dellview","state":"WI"} +{"index":{"_id":"54"}} +{"account_number":54,"balance":23406,"firstname":"Angel","lastname":"Mann","age":22,"gender":"F","address":"229 Ferris Street","employer":"Amtas","email":"angelmann@amtas.com","city":"Calverton","state":"WA"} +{"index":{"_id":"59"}} +{"account_number":59,"balance":37728,"firstname":"Malone","lastname":"Justice","age":37,"gender":"F","address":"721 Russell Street","employer":"Emoltra","email":"malonejustice@emoltra.com","city":"Trucksville","state":"HI"} +{"index":{"_id":"61"}} +{"account_number":61,"balance":6856,"firstname":"Shawn","lastname":"Baird","age":20,"gender":"M","address":"605 Monument Walk","employer":"Moltonic","email":"shawnbaird@moltonic.com","city":"Darlington","state":"MN"} +{"index":{"_id":"66"}} +{"account_number":66,"balance":25939,"firstname":"Franks","lastname":"Salinas","age":28,"gender":"M","address":"437 Hamilton Walk","employer":"Cowtown","email":"frankssalinas@cowtown.com","city":"Chase","state":"VT"} +{"index":{"_id":"73"}} +{"account_number":73,"balance":33457,"firstname":"Irene","lastname":"Stephenson","age":32,"gender":"M","address":"684 Miller Avenue","employer":"Hawkster","email":"irenestephenson@hawkster.com","city":"Levant","state":"AR"} +{"index":{"_id":"78"}} +{"account_number":78,"balance":48656,"firstname":"Elvira","lastname":"Patterson","age":23,"gender":"F","address":"834 Amber Street","employer":"Assistix","email":"elvirapatterson@assistix.com","city":"Dunbar","state":"TN"} +{"index":{"_id":"80"}} +{"account_number":80,"balance":13445,"firstname":"Lacey","lastname":"Blanchard","age":30,"gender":"F","address":"823 Himrod Street","employer":"Comdom","email":"laceyblanchard@comdom.com","city":"Matthews","state":"MO"} +{"index":{"_id":"85"}} +{"account_number":85,"balance":48735,"firstname":"Wilcox","lastname":"Sellers","age":20,"gender":"M","address":"212 Irving Avenue","employer":"Confrenzy","email":"wilcoxsellers@confrenzy.com","city":"Kipp","state":"MT"} +{"index":{"_id":"92"}} +{"account_number":92,"balance":26753,"firstname":"Gay","lastname":"Brewer","age":34,"gender":"M","address":"369 Ditmars Street","employer":"Savvy","email":"gaybrewer@savvy.com","city":"Moquino","state":"HI"} +{"index":{"_id":"97"}} +{"account_number":97,"balance":49671,"firstname":"Karen","lastname":"Trujillo","age":40,"gender":"F","address":"512 Cumberland Walk","employer":"Tsunamia","email":"karentrujillo@tsunamia.com","city":"Fredericktown","state":"MO"} +{"index":{"_id":"100"}} +{"account_number":100,"balance":29869,"firstname":"Madden","lastname":"Woods","age":32,"gender":"F","address":"696 Ryder Avenue","employer":"Slumberia","email":"maddenwoods@slumberia.com","city":"Deercroft","state":"ME"} +{"index":{"_id":"105"}} +{"account_number":105,"balance":29654,"firstname":"Castillo","lastname":"Dickerson","age":33,"gender":"F","address":"673 Oxford Street","employer":"Tellifly","email":"castillodickerson@tellifly.com","city":"Succasunna","state":"NY"} +{"index":{"_id":"112"}} +{"account_number":112,"balance":38395,"firstname":"Frederick","lastname":"Case","age":30,"gender":"F","address":"580 Lexington Avenue","employer":"Talkalot","email":"frederickcase@talkalot.com","city":"Orovada","state":"MA"} +{"index":{"_id":"117"}} +{"account_number":117,"balance":48831,"firstname":"Robin","lastname":"Hays","age":38,"gender":"F","address":"347 Hornell Loop","employer":"Pasturia","email":"robinhays@pasturia.com","city":"Sims","state":"WY"} +{"index":{"_id":"124"}} +{"account_number":124,"balance":16425,"firstname":"Fern","lastname":"Lambert","age":20,"gender":"M","address":"511 Jay Street","employer":"Furnitech","email":"fernlambert@furnitech.com","city":"Cloverdale","state":"FL"} +{"index":{"_id":"129"}} +{"account_number":129,"balance":42409,"firstname":"Alexandria","lastname":"Sanford","age":33,"gender":"F","address":"934 Ridgecrest Terrace","employer":"Kyagoro","email":"alexandriasanford@kyagoro.com","city":"Concho","state":"UT"} +{"index":{"_id":"131"}} +{"account_number":131,"balance":28030,"firstname":"Dollie","lastname":"Koch","age":22,"gender":"F","address":"287 Manhattan Avenue","employer":"Skinserve","email":"dolliekoch@skinserve.com","city":"Shasta","state":"PA"} +{"index":{"_id":"136"}} +{"account_number":136,"balance":45801,"firstname":"Winnie","lastname":"Holland","age":38,"gender":"M","address":"198 Mill Lane","employer":"Neteria","email":"winnieholland@neteria.com","city":"Urie","state":"IL"} +{"index":{"_id":"143"}} +{"account_number":143,"balance":43093,"firstname":"Cohen","lastname":"Noble","age":39,"gender":"M","address":"454 Nelson Street","employer":"Buzzworks","email":"cohennoble@buzzworks.com","city":"Norvelt","state":"CO"} +{"index":{"_id":"148"}} +{"account_number":148,"balance":3662,"firstname":"Annmarie","lastname":"Snider","age":34,"gender":"F","address":"857 Lafayette Walk","employer":"Edecine","email":"annmariesnider@edecine.com","city":"Hollins","state":"OH"} +{"index":{"_id":"150"}} +{"account_number":150,"balance":15306,"firstname":"Ortega","lastname":"Dalton","age":20,"gender":"M","address":"237 Mermaid Avenue","employer":"Rameon","email":"ortegadalton@rameon.com","city":"Maxville","state":"NH"} +{"index":{"_id":"155"}} +{"account_number":155,"balance":27878,"firstname":"Atkinson","lastname":"Hudson","age":39,"gender":"F","address":"434 Colin Place","employer":"Qualitern","email":"atkinsonhudson@qualitern.com","city":"Hoehne","state":"OH"} +{"index":{"_id":"162"}} +{"account_number":162,"balance":6302,"firstname":"Griffith","lastname":"Calderon","age":35,"gender":"M","address":"871 Vandervoort Place","employer":"Quotezart","email":"griffithcalderon@quotezart.com","city":"Barclay","state":"FL"} +{"index":{"_id":"167"}} +{"account_number":167,"balance":42051,"firstname":"Hampton","lastname":"Ryan","age":20,"gender":"M","address":"618 Fleet Place","employer":"Zipak","email":"hamptonryan@zipak.com","city":"Irwin","state":"KS"} +{"index":{"_id":"174"}} +{"account_number":174,"balance":1464,"firstname":"Gamble","lastname":"Pierce","age":23,"gender":"F","address":"650 Eagle Street","employer":"Matrixity","email":"gamblepierce@matrixity.com","city":"Abiquiu","state":"OR"} +{"index":{"_id":"179"}} +{"account_number":179,"balance":13265,"firstname":"Elise","lastname":"Drake","age":25,"gender":"M","address":"305 Christopher Avenue","employer":"Turnling","email":"elisedrake@turnling.com","city":"Loretto","state":"LA"} +{"index":{"_id":"181"}} +{"account_number":181,"balance":27983,"firstname":"Bennett","lastname":"Hampton","age":22,"gender":"F","address":"435 Billings Place","employer":"Voipa","email":"bennetthampton@voipa.com","city":"Rodman","state":"WY"} +{"index":{"_id":"186"}} +{"account_number":186,"balance":18373,"firstname":"Kline","lastname":"Joyce","age":32,"gender":"M","address":"285 Falmouth Street","employer":"Tetratrex","email":"klinejoyce@tetratrex.com","city":"Klondike","state":"SD"} +{"index":{"_id":"193"}} +{"account_number":193,"balance":13412,"firstname":"Patty","lastname":"Petty","age":34,"gender":"F","address":"251 Vermont Street","employer":"Kinetica","email":"pattypetty@kinetica.com","city":"Grantville","state":"MS"} +{"index":{"_id":"198"}} +{"account_number":198,"balance":19686,"firstname":"Rachael","lastname":"Sharp","age":38,"gender":"F","address":"443 Vernon Avenue","employer":"Powernet","email":"rachaelsharp@powernet.com","city":"Canoochee","state":"UT"} +{"index":{"_id":"201"}} +{"account_number":201,"balance":14586,"firstname":"Ronda","lastname":"Perry","age":25,"gender":"F","address":"856 Downing Street","employer":"Artiq","email":"rondaperry@artiq.com","city":"Colton","state":"WV"} +{"index":{"_id":"206"}} +{"account_number":206,"balance":47423,"firstname":"Kelli","lastname":"Francis","age":20,"gender":"M","address":"671 George Street","employer":"Exoswitch","email":"kellifrancis@exoswitch.com","city":"Babb","state":"NJ"} +{"index":{"_id":"213"}} +{"account_number":213,"balance":34172,"firstname":"Bauer","lastname":"Summers","age":27,"gender":"M","address":"257 Boynton Place","employer":"Voratak","email":"bauersummers@voratak.com","city":"Oceola","state":"NC"} +{"index":{"_id":"218"}} +{"account_number":218,"balance":26702,"firstname":"Garrison","lastname":"Bryan","age":24,"gender":"F","address":"478 Greenpoint Avenue","employer":"Uniworld","email":"garrisonbryan@uniworld.com","city":"Comptche","state":"WI"} +{"index":{"_id":"220"}} +{"account_number":220,"balance":3086,"firstname":"Tania","lastname":"Middleton","age":22,"gender":"F","address":"541 Gunther Place","employer":"Zerology","email":"taniamiddleton@zerology.com","city":"Linwood","state":"IN"} +{"index":{"_id":"225"}} +{"account_number":225,"balance":21949,"firstname":"Maryann","lastname":"Murphy","age":24,"gender":"F","address":"894 Bridgewater Street","employer":"Cinesanct","email":"maryannmurphy@cinesanct.com","city":"Cartwright","state":"RI"} +{"index":{"_id":"232"}} +{"account_number":232,"balance":11984,"firstname":"Carr","lastname":"Jensen","age":34,"gender":"F","address":"995 Micieli Place","employer":"Biohab","email":"carrjensen@biohab.com","city":"Waikele","state":"OH"} +{"index":{"_id":"237"}} +{"account_number":237,"balance":5603,"firstname":"Kirby","lastname":"Watkins","age":27,"gender":"F","address":"348 Blake Court","employer":"Sonique","email":"kirbywatkins@sonique.com","city":"Freelandville","state":"PA"} +{"index":{"_id":"244"}} +{"account_number":244,"balance":8048,"firstname":"Judith","lastname":"Riggs","age":27,"gender":"F","address":"590 Kosciusko Street","employer":"Arctiq","email":"judithriggs@arctiq.com","city":"Gorham","state":"DC"} +{"index":{"_id":"249"}} +{"account_number":249,"balance":16822,"firstname":"Mckinney","lastname":"Gallagher","age":38,"gender":"F","address":"939 Seigel Court","employer":"Premiant","email":"mckinneygallagher@premiant.com","city":"Catharine","state":"NH"} +{"index":{"_id":"251"}} +{"account_number":251,"balance":13475,"firstname":"Marks","lastname":"Graves","age":39,"gender":"F","address":"427 Lawn Court","employer":"Dentrex","email":"marksgraves@dentrex.com","city":"Waukeenah","state":"IL"} +{"index":{"_id":"256"}} +{"account_number":256,"balance":48318,"firstname":"Simon","lastname":"Hogan","age":31,"gender":"M","address":"789 Suydam Place","employer":"Dancerity","email":"simonhogan@dancerity.com","city":"Dargan","state":"GA"} +{"index":{"_id":"263"}} +{"account_number":263,"balance":12837,"firstname":"Thornton","lastname":"Meyer","age":29,"gender":"M","address":"575 Elliott Place","employer":"Peticular","email":"thorntonmeyer@peticular.com","city":"Dotsero","state":"NH"} +{"index":{"_id":"268"}} +{"account_number":268,"balance":20925,"firstname":"Avis","lastname":"Blackwell","age":36,"gender":"M","address":"569 Jerome Avenue","employer":"Magnina","email":"avisblackwell@magnina.com","city":"Bethany","state":"MD"} +{"index":{"_id":"270"}} +{"account_number":270,"balance":43951,"firstname":"Moody","lastname":"Harmon","age":39,"gender":"F","address":"233 Vanderbilt Street","employer":"Otherside","email":"moodyharmon@otherside.com","city":"Elwood","state":"MT"} +{"index":{"_id":"275"}} +{"account_number":275,"balance":2384,"firstname":"Reynolds","lastname":"Barnett","age":31,"gender":"M","address":"394 Stockton Street","employer":"Austex","email":"reynoldsbarnett@austex.com","city":"Grandview","state":"MS"} +{"index":{"_id":"282"}} +{"account_number":282,"balance":38540,"firstname":"Gay","lastname":"Schultz","age":25,"gender":"F","address":"805 Claver Place","employer":"Handshake","email":"gayschultz@handshake.com","city":"Tampico","state":"MA"} +{"index":{"_id":"287"}} +{"account_number":287,"balance":10845,"firstname":"Valerie","lastname":"Lang","age":35,"gender":"F","address":"423 Midwood Street","employer":"Quarx","email":"valerielang@quarx.com","city":"Cannondale","state":"VT"} +{"index":{"_id":"294"}} +{"account_number":294,"balance":29582,"firstname":"Pitts","lastname":"Haynes","age":26,"gender":"M","address":"901 Broome Street","employer":"Aquazure","email":"pittshaynes@aquazure.com","city":"Turah","state":"SD"} +{"index":{"_id":"299"}} +{"account_number":299,"balance":40825,"firstname":"Angela","lastname":"Talley","age":36,"gender":"F","address":"822 Bills Place","employer":"Remold","email":"angelatalley@remold.com","city":"Bethpage","state":"DC"} +{"index":{"_id":"302"}} +{"account_number":302,"balance":11298,"firstname":"Isabella","lastname":"Hewitt","age":40,"gender":"M","address":"455 Bedford Avenue","employer":"Cincyr","email":"isabellahewitt@cincyr.com","city":"Blanford","state":"IN"} +{"index":{"_id":"307"}} +{"account_number":307,"balance":43355,"firstname":"Enid","lastname":"Ashley","age":23,"gender":"M","address":"412 Emerson Place","employer":"Avenetro","email":"enidashley@avenetro.com","city":"Catherine","state":"WI"} +{"index":{"_id":"314"}} +{"account_number":314,"balance":5848,"firstname":"Norton","lastname":"Norton","age":35,"gender":"M","address":"252 Ditmas Avenue","employer":"Talkola","email":"nortonnorton@talkola.com","city":"Veyo","state":"SC"} +{"index":{"_id":"319"}} +{"account_number":319,"balance":15430,"firstname":"Ferrell","lastname":"Mckinney","age":36,"gender":"M","address":"874 Cranberry Street","employer":"Portaline","email":"ferrellmckinney@portaline.com","city":"Rose","state":"WV"} +{"index":{"_id":"321"}} +{"account_number":321,"balance":43370,"firstname":"Marta","lastname":"Larsen","age":35,"gender":"M","address":"617 Williams Court","employer":"Manufact","email":"martalarsen@manufact.com","city":"Sisquoc","state":"MA"} +{"index":{"_id":"326"}} +{"account_number":326,"balance":9692,"firstname":"Pearl","lastname":"Reese","age":30,"gender":"F","address":"451 Colonial Court","employer":"Accruex","email":"pearlreese@accruex.com","city":"Westmoreland","state":"MD"} +{"index":{"_id":"333"}} +{"account_number":333,"balance":22778,"firstname":"Trudy","lastname":"Sweet","age":27,"gender":"F","address":"881 Kiely Place","employer":"Acumentor","email":"trudysweet@acumentor.com","city":"Kent","state":"IA"} +{"index":{"_id":"338"}} +{"account_number":338,"balance":6969,"firstname":"Pierce","lastname":"Lawrence","age":35,"gender":"M","address":"318 Gallatin Place","employer":"Lunchpad","email":"piercelawrence@lunchpad.com","city":"Iola","state":"MD"} +{"index":{"_id":"340"}} +{"account_number":340,"balance":42072,"firstname":"Juarez","lastname":"Gutierrez","age":40,"gender":"F","address":"802 Seba Avenue","employer":"Billmed","email":"juarezgutierrez@billmed.com","city":"Malott","state":"OH"} +{"index":{"_id":"345"}} +{"account_number":345,"balance":9812,"firstname":"Parker","lastname":"Hines","age":38,"gender":"M","address":"715 Mill Avenue","employer":"Baluba","email":"parkerhines@baluba.com","city":"Blackgum","state":"KY"} +{"index":{"_id":"352"}} +{"account_number":352,"balance":20290,"firstname":"Kendra","lastname":"Mcintosh","age":31,"gender":"F","address":"963 Wolf Place","employer":"Orboid","email":"kendramcintosh@orboid.com","city":"Bladensburg","state":"AK"} +{"index":{"_id":"357"}} +{"account_number":357,"balance":15102,"firstname":"Adele","lastname":"Carroll","age":39,"gender":"F","address":"381 Arion Place","employer":"Aquafire","email":"adelecarroll@aquafire.com","city":"Springville","state":"RI"} +{"index":{"_id":"364"}} +{"account_number":364,"balance":35247,"firstname":"Felicia","lastname":"Merrill","age":40,"gender":"F","address":"229 Branton Street","employer":"Prosely","email":"feliciamerrill@prosely.com","city":"Dola","state":"MA"} +{"index":{"_id":"369"}} +{"account_number":369,"balance":17047,"firstname":"Mcfadden","lastname":"Guy","age":28,"gender":"F","address":"445 Lott Avenue","employer":"Kangle","email":"mcfaddenguy@kangle.com","city":"Greenbackville","state":"DE"} +{"index":{"_id":"371"}} +{"account_number":371,"balance":19751,"firstname":"Barker","lastname":"Allen","age":32,"gender":"F","address":"295 Wallabout Street","employer":"Nexgene","email":"barkerallen@nexgene.com","city":"Nanafalia","state":"NE"} +{"index":{"_id":"376"}} +{"account_number":376,"balance":44407,"firstname":"Mcmillan","lastname":"Dunn","age":21,"gender":"F","address":"771 Dorchester Road","employer":"Eargo","email":"mcmillandunn@eargo.com","city":"Yogaville","state":"RI"} +{"index":{"_id":"383"}} +{"account_number":383,"balance":48889,"firstname":"Knox","lastname":"Larson","age":28,"gender":"F","address":"962 Bartlett Place","employer":"Bostonic","email":"knoxlarson@bostonic.com","city":"Smeltertown","state":"TX"} +{"index":{"_id":"388"}} +{"account_number":388,"balance":9606,"firstname":"Julianne","lastname":"Nicholson","age":26,"gender":"F","address":"338 Crescent Street","employer":"Viasia","email":"juliannenicholson@viasia.com","city":"Alleghenyville","state":"MO"} +{"index":{"_id":"390"}} +{"account_number":390,"balance":7464,"firstname":"Ramona","lastname":"Roy","age":32,"gender":"M","address":"135 Banner Avenue","employer":"Deminimum","email":"ramonaroy@deminimum.com","city":"Dodge","state":"ID"} +{"index":{"_id":"395"}} +{"account_number":395,"balance":18679,"firstname":"Juliet","lastname":"Whitaker","age":31,"gender":"M","address":"128 Remsen Avenue","employer":"Toyletry","email":"julietwhitaker@toyletry.com","city":"Yonah","state":"LA"} +{"index":{"_id":"403"}} +{"account_number":403,"balance":18833,"firstname":"Williamson","lastname":"Horn","age":32,"gender":"M","address":"223 Strickland Avenue","employer":"Nimon","email":"williamsonhorn@nimon.com","city":"Bawcomville","state":"NJ"} +{"index":{"_id":"408"}} +{"account_number":408,"balance":34666,"firstname":"Lidia","lastname":"Guerrero","age":30,"gender":"M","address":"254 Stratford Road","employer":"Snowpoke","email":"lidiaguerrero@snowpoke.com","city":"Fairlee","state":"LA"} +{"index":{"_id":"410"}} +{"account_number":410,"balance":31200,"firstname":"Fox","lastname":"Cardenas","age":39,"gender":"M","address":"987 Monitor Street","employer":"Corpulse","email":"foxcardenas@corpulse.com","city":"Southview","state":"NE"} +{"index":{"_id":"415"}} +{"account_number":415,"balance":19449,"firstname":"Martinez","lastname":"Benson","age":36,"gender":"M","address":"172 Berkeley Place","employer":"Enersol","email":"martinezbenson@enersol.com","city":"Chumuckla","state":"AL"} +{"index":{"_id":"422"}} +{"account_number":422,"balance":40162,"firstname":"Brigitte","lastname":"Scott","age":26,"gender":"M","address":"662 Vermont Court","employer":"Waretel","email":"brigittescott@waretel.com","city":"Elrama","state":"VA"} +{"index":{"_id":"427"}} +{"account_number":427,"balance":1463,"firstname":"Rebekah","lastname":"Garrison","age":36,"gender":"F","address":"837 Hampton Avenue","employer":"Niquent","email":"rebekahgarrison@niquent.com","city":"Zarephath","state":"NY"} +{"index":{"_id":"434"}} +{"account_number":434,"balance":11329,"firstname":"Christa","lastname":"Huff","age":25,"gender":"M","address":"454 Oriental Boulevard","employer":"Earthpure","email":"christahuff@earthpure.com","city":"Stevens","state":"DC"} +{"index":{"_id":"439"}} +{"account_number":439,"balance":22752,"firstname":"Lula","lastname":"Williams","age":35,"gender":"M","address":"630 Furman Avenue","employer":"Vinch","email":"lulawilliams@vinch.com","city":"Newcastle","state":"ME"} +{"index":{"_id":"441"}} +{"account_number":441,"balance":47947,"firstname":"Dickson","lastname":"Mcgee","age":29,"gender":"M","address":"478 Knight Court","employer":"Gogol","email":"dicksonmcgee@gogol.com","city":"Laurelton","state":"AR"} +{"index":{"_id":"446"}} +{"account_number":446,"balance":23071,"firstname":"Lolita","lastname":"Fleming","age":32,"gender":"F","address":"918 Bridge Street","employer":"Vidto","email":"lolitafleming@vidto.com","city":"Brownlee","state":"HI"} +{"index":{"_id":"453"}} +{"account_number":453,"balance":21520,"firstname":"Hood","lastname":"Powell","age":24,"gender":"F","address":"479 Brevoort Place","employer":"Vortexaco","email":"hoodpowell@vortexaco.com","city":"Alderpoint","state":"CT"} +{"index":{"_id":"458"}} +{"account_number":458,"balance":8865,"firstname":"Aida","lastname":"Wolf","age":21,"gender":"F","address":"403 Thames Street","employer":"Isis","email":"aidawolf@isis.com","city":"Bordelonville","state":"ME"} +{"index":{"_id":"460"}} +{"account_number":460,"balance":37734,"firstname":"Aguirre","lastname":"White","age":21,"gender":"F","address":"190 Crooke Avenue","employer":"Unq","email":"aguirrewhite@unq.com","city":"Albany","state":"NJ"} +{"index":{"_id":"465"}} +{"account_number":465,"balance":10681,"firstname":"Pearlie","lastname":"Holman","age":29,"gender":"M","address":"916 Evergreen Avenue","employer":"Hometown","email":"pearlieholman@hometown.com","city":"Needmore","state":"UT"} +{"index":{"_id":"472"}} +{"account_number":472,"balance":25571,"firstname":"Lee","lastname":"Long","age":32,"gender":"F","address":"288 Mill Street","employer":"Comverges","email":"leelong@comverges.com","city":"Movico","state":"MT"} +{"index":{"_id":"477"}} +{"account_number":477,"balance":25892,"firstname":"Holcomb","lastname":"Cobb","age":40,"gender":"M","address":"369 Marconi Place","employer":"Steeltab","email":"holcombcobb@steeltab.com","city":"Byrnedale","state":"CA"} +{"index":{"_id":"484"}} +{"account_number":484,"balance":3274,"firstname":"Staci","lastname":"Melendez","age":35,"gender":"F","address":"751 Otsego Street","employer":"Namebox","email":"stacimelendez@namebox.com","city":"Harborton","state":"NV"} +{"index":{"_id":"489"}} +{"account_number":489,"balance":7879,"firstname":"Garrett","lastname":"Langley","age":36,"gender":"M","address":"331 Bowne Street","employer":"Zillidium","email":"garrettlangley@zillidium.com","city":"Riviera","state":"LA"} +{"index":{"_id":"491"}} +{"account_number":491,"balance":42942,"firstname":"Teresa","lastname":"Owen","age":24,"gender":"F","address":"713 Canton Court","employer":"Plasmos","email":"teresaowen@plasmos.com","city":"Bartonsville","state":"NH"} +{"index":{"_id":"496"}} +{"account_number":496,"balance":14869,"firstname":"Alison","lastname":"Conrad","age":35,"gender":"F","address":"347 Varet Street","employer":"Perkle","email":"alisonconrad@perkle.com","city":"Cliffside","state":"OH"} +{"index":{"_id":"504"}} +{"account_number":504,"balance":49205,"firstname":"Shanna","lastname":"Chambers","age":23,"gender":"M","address":"220 Beard Street","employer":"Corporana","email":"shannachambers@corporana.com","city":"Cashtown","state":"AZ"} +{"index":{"_id":"509"}} +{"account_number":509,"balance":34754,"firstname":"Durham","lastname":"Pacheco","age":40,"gender":"M","address":"129 Plymouth Street","employer":"Datacator","email":"durhampacheco@datacator.com","city":"Loveland","state":"NC"} +{"index":{"_id":"511"}} +{"account_number":511,"balance":40908,"firstname":"Elba","lastname":"Grant","age":24,"gender":"F","address":"157 Bijou Avenue","employer":"Dognost","email":"elbagrant@dognost.com","city":"Coyote","state":"MT"} +{"index":{"_id":"516"}} +{"account_number":516,"balance":44940,"firstname":"Roy","lastname":"Smith","age":37,"gender":"M","address":"770 Cherry Street","employer":"Parleynet","email":"roysmith@parleynet.com","city":"Carrsville","state":"RI"} +{"index":{"_id":"523"}} +{"account_number":523,"balance":28729,"firstname":"Amalia","lastname":"Benjamin","age":40,"gender":"F","address":"173 Bushwick Place","employer":"Sentia","email":"amaliabenjamin@sentia.com","city":"Jacumba","state":"OK"} +{"index":{"_id":"528"}} +{"account_number":528,"balance":4071,"firstname":"Thompson","lastname":"Hoover","age":27,"gender":"F","address":"580 Garden Street","employer":"Portalis","email":"thompsonhoover@portalis.com","city":"Knowlton","state":"AL"} +{"index":{"_id":"530"}} +{"account_number":530,"balance":8840,"firstname":"Kathrine","lastname":"Evans","age":37,"gender":"M","address":"422 Division Place","employer":"Spherix","email":"kathrineevans@spherix.com","city":"Biddle","state":"CO"} +{"index":{"_id":"535"}} +{"account_number":535,"balance":8715,"firstname":"Fry","lastname":"George","age":34,"gender":"M","address":"722 Green Street","employer":"Ewaves","email":"frygeorge@ewaves.com","city":"Kenmar","state":"DE"} +{"index":{"_id":"542"}} +{"account_number":542,"balance":23285,"firstname":"Michelle","lastname":"Mayo","age":35,"gender":"M","address":"657 Caton Place","employer":"Biflex","email":"michellemayo@biflex.com","city":"Beaverdale","state":"WY"} +{"index":{"_id":"547"}} +{"account_number":547,"balance":12870,"firstname":"Eaton","lastname":"Rios","age":32,"gender":"M","address":"744 Withers Street","employer":"Podunk","email":"eatonrios@podunk.com","city":"Chelsea","state":"IA"} +{"index":{"_id":"554"}} +{"account_number":554,"balance":33163,"firstname":"Townsend","lastname":"Atkins","age":39,"gender":"M","address":"566 Ira Court","employer":"Acruex","email":"townsendatkins@acruex.com","city":"Valle","state":"IA"} +{"index":{"_id":"559"}} +{"account_number":559,"balance":11450,"firstname":"Tonia","lastname":"Schmidt","age":38,"gender":"F","address":"508 Sheffield Avenue","employer":"Extro","email":"toniaschmidt@extro.com","city":"Newry","state":"CT"} +{"index":{"_id":"561"}} +{"account_number":561,"balance":12370,"firstname":"Sellers","lastname":"Davis","age":30,"gender":"M","address":"860 Madoc Avenue","employer":"Isodrive","email":"sellersdavis@isodrive.com","city":"Trail","state":"KS"} +{"index":{"_id":"566"}} +{"account_number":566,"balance":6183,"firstname":"Cox","lastname":"Roman","age":37,"gender":"M","address":"349 Winthrop Street","employer":"Medcom","email":"coxroman@medcom.com","city":"Rosewood","state":"WY"} +{"index":{"_id":"573"}} +{"account_number":573,"balance":32171,"firstname":"Callie","lastname":"Castaneda","age":36,"gender":"M","address":"799 Scott Avenue","employer":"Earthwax","email":"calliecastaneda@earthwax.com","city":"Marshall","state":"NH"} +{"index":{"_id":"578"}} +{"account_number":578,"balance":34259,"firstname":"Holmes","lastname":"Mcknight","age":37,"gender":"M","address":"969 Metropolitan Avenue","employer":"Cubicide","email":"holmesmcknight@cubicide.com","city":"Aguila","state":"PA"} +{"index":{"_id":"580"}} +{"account_number":580,"balance":13716,"firstname":"Mcmahon","lastname":"York","age":34,"gender":"M","address":"475 Beacon Court","employer":"Zillar","email":"mcmahonyork@zillar.com","city":"Farmington","state":"MO"} +{"index":{"_id":"585"}} +{"account_number":585,"balance":26745,"firstname":"Nieves","lastname":"Nolan","age":32,"gender":"M","address":"115 Seagate Terrace","employer":"Jumpstack","email":"nievesnolan@jumpstack.com","city":"Eastmont","state":"UT"} +{"index":{"_id":"592"}} +{"account_number":592,"balance":32968,"firstname":"Head","lastname":"Webster","age":36,"gender":"F","address":"987 Lefferts Avenue","employer":"Empirica","email":"headwebster@empirica.com","city":"Rockingham","state":"TN"} +{"index":{"_id":"597"}} +{"account_number":597,"balance":11246,"firstname":"Penny","lastname":"Knowles","age":33,"gender":"M","address":"139 Forbell Street","employer":"Ersum","email":"pennyknowles@ersum.com","city":"Vallonia","state":"IA"} +{"index":{"_id":"600"}} +{"account_number":600,"balance":10336,"firstname":"Simmons","lastname":"Byers","age":37,"gender":"M","address":"250 Dictum Court","employer":"Qualitex","email":"simmonsbyers@qualitex.com","city":"Wanship","state":"OH"} +{"index":{"_id":"605"}} +{"account_number":605,"balance":38427,"firstname":"Mcclain","lastname":"Manning","age":24,"gender":"M","address":"832 Leonard Street","employer":"Qiao","email":"mcclainmanning@qiao.com","city":"Calvary","state":"TX"} +{"index":{"_id":"612"}} +{"account_number":612,"balance":11868,"firstname":"Dunn","lastname":"Cameron","age":32,"gender":"F","address":"156 Lorimer Street","employer":"Isonus","email":"dunncameron@isonus.com","city":"Virgie","state":"ND"} +{"index":{"_id":"617"}} +{"account_number":617,"balance":35445,"firstname":"Kitty","lastname":"Cooley","age":22,"gender":"M","address":"788 Seagate Avenue","employer":"Ultrimax","email":"kittycooley@ultrimax.com","city":"Clarktown","state":"MD"} +{"index":{"_id":"624"}} +{"account_number":624,"balance":27538,"firstname":"Roxanne","lastname":"Franklin","age":39,"gender":"F","address":"299 Woodrow Court","employer":"Silodyne","email":"roxannefranklin@silodyne.com","city":"Roulette","state":"VA"} +{"index":{"_id":"629"}} +{"account_number":629,"balance":32987,"firstname":"Mcclure","lastname":"Rodgers","age":26,"gender":"M","address":"806 Pierrepont Place","employer":"Elita","email":"mcclurerodgers@elita.com","city":"Brownsville","state":"MI"} +{"index":{"_id":"631"}} +{"account_number":631,"balance":21657,"firstname":"Corrine","lastname":"Barber","age":32,"gender":"F","address":"447 Hunts Lane","employer":"Quarmony","email":"corrinebarber@quarmony.com","city":"Wyano","state":"IL"} +{"index":{"_id":"636"}} +{"account_number":636,"balance":8036,"firstname":"Agnes","lastname":"Hooper","age":25,"gender":"M","address":"865 Hanson Place","employer":"Digial","email":"agneshooper@digial.com","city":"Sperryville","state":"OK"} +{"index":{"_id":"643"}} +{"account_number":643,"balance":8057,"firstname":"Hendricks","lastname":"Stokes","age":23,"gender":"F","address":"142 Barbey Street","employer":"Remotion","email":"hendricksstokes@remotion.com","city":"Lewis","state":"MA"} +{"index":{"_id":"648"}} +{"account_number":648,"balance":11506,"firstname":"Terry","lastname":"Montgomery","age":21,"gender":"F","address":"115 Franklin Avenue","employer":"Enervate","email":"terrymontgomery@enervate.com","city":"Bascom","state":"MA"} +{"index":{"_id":"650"}} +{"account_number":650,"balance":18091,"firstname":"Benton","lastname":"Knight","age":28,"gender":"F","address":"850 Aitken Place","employer":"Pholio","email":"bentonknight@pholio.com","city":"Cobbtown","state":"AL"} +{"index":{"_id":"655"}} +{"account_number":655,"balance":22912,"firstname":"Eula","lastname":"Taylor","age":30,"gender":"M","address":"520 Orient Avenue","employer":"Miracula","email":"eulataylor@miracula.com","city":"Wacissa","state":"IN"} +{"index":{"_id":"662"}} +{"account_number":662,"balance":10138,"firstname":"Daisy","lastname":"Burnett","age":33,"gender":"M","address":"114 Norman Avenue","employer":"Liquicom","email":"daisyburnett@liquicom.com","city":"Grahamtown","state":"MD"} +{"index":{"_id":"667"}} +{"account_number":667,"balance":22559,"firstname":"Juliana","lastname":"Chase","age":32,"gender":"M","address":"496 Coleridge Street","employer":"Comtract","email":"julianachase@comtract.com","city":"Wilsonia","state":"NJ"} +{"index":{"_id":"674"}} +{"account_number":674,"balance":36038,"firstname":"Watts","lastname":"Shannon","age":22,"gender":"F","address":"600 Story Street","employer":"Joviold","email":"wattsshannon@joviold.com","city":"Fairhaven","state":"ID"} +{"index":{"_id":"679"}} +{"account_number":679,"balance":20149,"firstname":"Henrietta","lastname":"Bonner","age":33,"gender":"M","address":"461 Bond Street","employer":"Geekol","email":"henriettabonner@geekol.com","city":"Richville","state":"WA"} +{"index":{"_id":"681"}} +{"account_number":681,"balance":34244,"firstname":"Velazquez","lastname":"Wolfe","age":33,"gender":"M","address":"773 Eckford Street","employer":"Zisis","email":"velazquezwolfe@zisis.com","city":"Smock","state":"ME"} +{"index":{"_id":"686"}} +{"account_number":686,"balance":10116,"firstname":"Decker","lastname":"Mcclure","age":30,"gender":"F","address":"236 Commerce Street","employer":"Everest","email":"deckermcclure@everest.com","city":"Gibbsville","state":"TN"} +{"index":{"_id":"693"}} +{"account_number":693,"balance":31233,"firstname":"Tabatha","lastname":"Zimmerman","age":30,"gender":"F","address":"284 Emmons Avenue","employer":"Pushcart","email":"tabathazimmerman@pushcart.com","city":"Esmont","state":"NC"} +{"index":{"_id":"698"}} +{"account_number":698,"balance":14965,"firstname":"Baker","lastname":"Armstrong","age":36,"gender":"F","address":"796 Tehama Street","employer":"Nurplex","email":"bakerarmstrong@nurplex.com","city":"Starks","state":"UT"} +{"index":{"_id":"701"}} +{"account_number":701,"balance":23772,"firstname":"Gardner","lastname":"Griffith","age":27,"gender":"M","address":"187 Moore Place","employer":"Vertide","email":"gardnergriffith@vertide.com","city":"Coventry","state":"NV"} +{"index":{"_id":"706"}} +{"account_number":706,"balance":5282,"firstname":"Eliza","lastname":"Potter","age":39,"gender":"M","address":"945 Dunham Place","employer":"Playce","email":"elizapotter@playce.com","city":"Woodruff","state":"AK"} +{"index":{"_id":"713"}} +{"account_number":713,"balance":20054,"firstname":"Iris","lastname":"Mcguire","age":21,"gender":"F","address":"508 Benson Avenue","employer":"Duflex","email":"irismcguire@duflex.com","city":"Hillsboro","state":"MO"} +{"index":{"_id":"718"}} +{"account_number":718,"balance":13876,"firstname":"Hickman","lastname":"Dillard","age":22,"gender":"F","address":"132 Etna Street","employer":"Genmy","email":"hickmandillard@genmy.com","city":"Curtice","state":"NV"} +{"index":{"_id":"720"}} +{"account_number":720,"balance":31356,"firstname":"Ruth","lastname":"Vance","age":32,"gender":"F","address":"229 Adams Street","employer":"Zilidium","email":"ruthvance@zilidium.com","city":"Allison","state":"IA"} +{"index":{"_id":"725"}} +{"account_number":725,"balance":14677,"firstname":"Reeves","lastname":"Tillman","age":26,"gender":"M","address":"674 Ivan Court","employer":"Cemention","email":"reevestillman@cemention.com","city":"Navarre","state":"MA"} +{"index":{"_id":"732"}} +{"account_number":732,"balance":38445,"firstname":"Delia","lastname":"Cruz","age":37,"gender":"F","address":"870 Cheever Place","employer":"Multron","email":"deliacruz@multron.com","city":"Cresaptown","state":"NH"} +{"index":{"_id":"737"}} +{"account_number":737,"balance":40431,"firstname":"Sampson","lastname":"Yates","age":23,"gender":"F","address":"214 Cox Place","employer":"Signidyne","email":"sampsonyates@signidyne.com","city":"Brazos","state":"GA"} +{"index":{"_id":"744"}} +{"account_number":744,"balance":8690,"firstname":"Bernard","lastname":"Martinez","age":21,"gender":"M","address":"148 Dunne Place","employer":"Dragbot","email":"bernardmartinez@dragbot.com","city":"Moraida","state":"MN"} +{"index":{"_id":"749"}} +{"account_number":749,"balance":1249,"firstname":"Rush","lastname":"Boyle","age":36,"gender":"M","address":"310 Argyle Road","employer":"Sportan","email":"rushboyle@sportan.com","city":"Brady","state":"WA"} +{"index":{"_id":"751"}} +{"account_number":751,"balance":49252,"firstname":"Patrick","lastname":"Osborne","age":23,"gender":"M","address":"915 Prospect Avenue","employer":"Gynko","email":"patrickosborne@gynko.com","city":"Takilma","state":"MO"} +{"index":{"_id":"756"}} +{"account_number":756,"balance":40006,"firstname":"Jasmine","lastname":"Howell","age":32,"gender":"M","address":"605 Elliott Walk","employer":"Ecratic","email":"jasminehowell@ecratic.com","city":"Harrodsburg","state":"OH"} +{"index":{"_id":"763"}} +{"account_number":763,"balance":12091,"firstname":"Liz","lastname":"Bentley","age":22,"gender":"F","address":"933 Debevoise Avenue","employer":"Nipaz","email":"lizbentley@nipaz.com","city":"Glenville","state":"NJ"} +{"index":{"_id":"768"}} +{"account_number":768,"balance":2213,"firstname":"Sondra","lastname":"Soto","age":21,"gender":"M","address":"625 Colonial Road","employer":"Navir","email":"sondrasoto@navir.com","city":"Benson","state":"VA"} +{"index":{"_id":"770"}} +{"account_number":770,"balance":39505,"firstname":"Joann","lastname":"Crane","age":26,"gender":"M","address":"798 Farragut Place","employer":"Lingoage","email":"joanncrane@lingoage.com","city":"Kirk","state":"MA"} +{"index":{"_id":"775"}} +{"account_number":775,"balance":27943,"firstname":"Wilson","lastname":"Merritt","age":33,"gender":"F","address":"288 Thornton Street","employer":"Geeky","email":"wilsonmerritt@geeky.com","city":"Holtville","state":"HI"} +{"index":{"_id":"782"}} +{"account_number":782,"balance":3960,"firstname":"Maldonado","lastname":"Craig","age":36,"gender":"F","address":"345 Myrtle Avenue","employer":"Zilencio","email":"maldonadocraig@zilencio.com","city":"Yukon","state":"ID"} +{"index":{"_id":"787"}} +{"account_number":787,"balance":11876,"firstname":"Harper","lastname":"Wynn","age":21,"gender":"F","address":"139 Oceanic Avenue","employer":"Interfind","email":"harperwynn@interfind.com","city":"Gerber","state":"ND"} +{"index":{"_id":"794"}} +{"account_number":794,"balance":16491,"firstname":"Walker","lastname":"Charles","age":32,"gender":"M","address":"215 Kenilworth Place","employer":"Orbin","email":"walkercharles@orbin.com","city":"Rivers","state":"WI"} +{"index":{"_id":"799"}} +{"account_number":799,"balance":2889,"firstname":"Myra","lastname":"Guerra","age":28,"gender":"F","address":"625 Dahlgreen Place","employer":"Digigene","email":"myraguerra@digigene.com","city":"Draper","state":"CA"} +{"index":{"_id":"802"}} +{"account_number":802,"balance":19630,"firstname":"Gracie","lastname":"Foreman","age":40,"gender":"F","address":"219 Kent Avenue","employer":"Supportal","email":"gracieforeman@supportal.com","city":"Westboro","state":"NH"} +{"index":{"_id":"807"}} +{"account_number":807,"balance":29206,"firstname":"Hatfield","lastname":"Lowe","age":23,"gender":"M","address":"499 Adler Place","employer":"Lovepad","email":"hatfieldlowe@lovepad.com","city":"Wiscon","state":"DC"} +{"index":{"_id":"814"}} +{"account_number":814,"balance":9838,"firstname":"Morse","lastname":"Mcbride","age":26,"gender":"F","address":"776 Calyer Street","employer":"Inear","email":"morsemcbride@inear.com","city":"Kingstowne","state":"ND"} +{"index":{"_id":"819"}} +{"account_number":819,"balance":3971,"firstname":"Karyn","lastname":"Medina","age":24,"gender":"F","address":"417 Utica Avenue","employer":"Qnekt","email":"karynmedina@qnekt.com","city":"Kerby","state":"WY"} +{"index":{"_id":"821"}} +{"account_number":821,"balance":33271,"firstname":"Trisha","lastname":"Blankenship","age":22,"gender":"M","address":"329 Jamaica Avenue","employer":"Chorizon","email":"trishablankenship@chorizon.com","city":"Sexton","state":"VT"} +{"index":{"_id":"826"}} +{"account_number":826,"balance":11548,"firstname":"Summers","lastname":"Vinson","age":22,"gender":"F","address":"742 Irwin Street","employer":"Globoil","email":"summersvinson@globoil.com","city":"Callaghan","state":"WY"} +{"index":{"_id":"833"}} +{"account_number":833,"balance":46154,"firstname":"Woodward","lastname":"Hood","age":22,"gender":"M","address":"398 Atkins Avenue","employer":"Zedalis","email":"woodwardhood@zedalis.com","city":"Stonybrook","state":"NE"} +{"index":{"_id":"838"}} +{"account_number":838,"balance":24629,"firstname":"Latonya","lastname":"Blake","age":37,"gender":"F","address":"531 Milton Street","employer":"Rugstars","email":"latonyablake@rugstars.com","city":"Tedrow","state":"WA"} +{"index":{"_id":"840"}} +{"account_number":840,"balance":39615,"firstname":"Boone","lastname":"Gomez","age":38,"gender":"M","address":"256 Hampton Place","employer":"Geekular","email":"boonegomez@geekular.com","city":"Westerville","state":"HI"} +{"index":{"_id":"845"}} +{"account_number":845,"balance":35422,"firstname":"Tracy","lastname":"Vaughn","age":39,"gender":"M","address":"645 Rockaway Parkway","employer":"Andryx","email":"tracyvaughn@andryx.com","city":"Wilmington","state":"ME"} +{"index":{"_id":"852"}} +{"account_number":852,"balance":6041,"firstname":"Allen","lastname":"Hammond","age":26,"gender":"M","address":"793 Essex Street","employer":"Tersanki","email":"allenhammond@tersanki.com","city":"Osmond","state":"NC"} +{"index":{"_id":"857"}} +{"account_number":857,"balance":39678,"firstname":"Alyce","lastname":"Douglas","age":23,"gender":"M","address":"326 Robert Street","employer":"Earbang","email":"alycedouglas@earbang.com","city":"Thornport","state":"GA"} +{"index":{"_id":"864"}} +{"account_number":864,"balance":21804,"firstname":"Duffy","lastname":"Anthony","age":23,"gender":"M","address":"582 Cooke Court","employer":"Schoolio","email":"duffyanthony@schoolio.com","city":"Brenton","state":"CO"} +{"index":{"_id":"869"}} +{"account_number":869,"balance":43544,"firstname":"Corinne","lastname":"Robbins","age":25,"gender":"F","address":"732 Quentin Road","employer":"Orbaxter","email":"corinnerobbins@orbaxter.com","city":"Roy","state":"TN"} +{"index":{"_id":"871"}} +{"account_number":871,"balance":35854,"firstname":"Norma","lastname":"Burt","age":32,"gender":"M","address":"934 Cyrus Avenue","employer":"Magnafone","email":"normaburt@magnafone.com","city":"Eden","state":"TN"} +{"index":{"_id":"876"}} +{"account_number":876,"balance":48568,"firstname":"Brady","lastname":"Glover","age":21,"gender":"F","address":"565 Oceanview Avenue","employer":"Comvex","email":"bradyglover@comvex.com","city":"Noblestown","state":"ID"} +{"index":{"_id":"883"}} +{"account_number":883,"balance":33679,"firstname":"Austin","lastname":"Jefferson","age":34,"gender":"M","address":"846 Lincoln Avenue","employer":"Polarax","email":"austinjefferson@polarax.com","city":"Savannah","state":"CT"} +{"index":{"_id":"888"}} +{"account_number":888,"balance":22277,"firstname":"Myrna","lastname":"Herman","age":39,"gender":"F","address":"649 Harwood Place","employer":"Enthaze","email":"myrnaherman@enthaze.com","city":"Idamay","state":"AR"} +{"index":{"_id":"890"}} +{"account_number":890,"balance":31198,"firstname":"Alvarado","lastname":"Pate","age":25,"gender":"M","address":"269 Ashland Place","employer":"Ovolo","email":"alvaradopate@ovolo.com","city":"Volta","state":"MI"} +{"index":{"_id":"895"}} +{"account_number":895,"balance":7327,"firstname":"Lara","lastname":"Mcdaniel","age":36,"gender":"M","address":"854 Willow Place","employer":"Acusage","email":"laramcdaniel@acusage.com","city":"Imperial","state":"NC"} +{"index":{"_id":"903"}} +{"account_number":903,"balance":10238,"firstname":"Wade","lastname":"Page","age":35,"gender":"F","address":"685 Waldorf Court","employer":"Eplosion","email":"wadepage@eplosion.com","city":"Welda","state":"AL"} +{"index":{"_id":"908"}} +{"account_number":908,"balance":45975,"firstname":"Mosley","lastname":"Holloway","age":31,"gender":"M","address":"929 Eldert Lane","employer":"Anivet","email":"mosleyholloway@anivet.com","city":"Biehle","state":"MS"} +{"index":{"_id":"910"}} +{"account_number":910,"balance":36831,"firstname":"Esmeralda","lastname":"James","age":23,"gender":"F","address":"535 High Street","employer":"Terrasys","email":"esmeraldajames@terrasys.com","city":"Dubois","state":"IN"} +{"index":{"_id":"915"}} +{"account_number":915,"balance":19816,"firstname":"Farrell","lastname":"French","age":35,"gender":"F","address":"126 McKibbin Street","employer":"Techmania","email":"farrellfrench@techmania.com","city":"Wescosville","state":"AL"} +{"index":{"_id":"922"}} +{"account_number":922,"balance":39347,"firstname":"Irwin","lastname":"Pugh","age":32,"gender":"M","address":"463 Shale Street","employer":"Idego","email":"irwinpugh@idego.com","city":"Ivanhoe","state":"ID"} +{"index":{"_id":"927"}} +{"account_number":927,"balance":19976,"firstname":"Jeanette","lastname":"Acevedo","age":26,"gender":"M","address":"694 Polhemus Place","employer":"Halap","email":"jeanetteacevedo@halap.com","city":"Harrison","state":"MO"} +{"index":{"_id":"934"}} +{"account_number":934,"balance":43987,"firstname":"Freida","lastname":"Daniels","age":34,"gender":"M","address":"448 Cove Lane","employer":"Vurbo","email":"freidadaniels@vurbo.com","city":"Snelling","state":"NJ"} +{"index":{"_id":"939"}} +{"account_number":939,"balance":31228,"firstname":"Hodges","lastname":"Massey","age":37,"gender":"F","address":"431 Dahl Court","employer":"Kegular","email":"hodgesmassey@kegular.com","city":"Katonah","state":"MD"} +{"index":{"_id":"941"}} +{"account_number":941,"balance":38796,"firstname":"Kim","lastname":"Moss","age":28,"gender":"F","address":"105 Onderdonk Avenue","employer":"Digirang","email":"kimmoss@digirang.com","city":"Centerville","state":"TX"} +{"index":{"_id":"946"}} +{"account_number":946,"balance":42794,"firstname":"Ina","lastname":"Obrien","age":36,"gender":"M","address":"339 Rewe Street","employer":"Eclipsent","email":"inaobrien@eclipsent.com","city":"Soham","state":"RI"} +{"index":{"_id":"953"}} +{"account_number":953,"balance":1110,"firstname":"Baxter","lastname":"Black","age":27,"gender":"M","address":"720 Stillwell Avenue","employer":"Uplinx","email":"baxterblack@uplinx.com","city":"Drummond","state":"MN"} +{"index":{"_id":"958"}} +{"account_number":958,"balance":32849,"firstname":"Brown","lastname":"Wilkins","age":40,"gender":"M","address":"686 Delmonico Place","employer":"Medesign","email":"brownwilkins@medesign.com","city":"Shelby","state":"WY"} +{"index":{"_id":"960"}} +{"account_number":960,"balance":2905,"firstname":"Curry","lastname":"Vargas","age":40,"gender":"M","address":"242 Blake Avenue","employer":"Pearlesex","email":"curryvargas@pearlesex.com","city":"Henrietta","state":"NH"} +{"index":{"_id":"965"}} +{"account_number":965,"balance":21882,"firstname":"Patrica","lastname":"Melton","age":28,"gender":"M","address":"141 Rodney Street","employer":"Flexigen","email":"patricamelton@flexigen.com","city":"Klagetoh","state":"MD"} +{"index":{"_id":"972"}} +{"account_number":972,"balance":24719,"firstname":"Leona","lastname":"Christian","age":26,"gender":"F","address":"900 Woodpoint Road","employer":"Extrawear","email":"leonachristian@extrawear.com","city":"Roderfield","state":"MA"} +{"index":{"_id":"977"}} +{"account_number":977,"balance":6744,"firstname":"Rodgers","lastname":"Mccray","age":21,"gender":"F","address":"612 Duryea Place","employer":"Papricut","email":"rodgersmccray@papricut.com","city":"Marenisco","state":"MD"} +{"index":{"_id":"984"}} +{"account_number":984,"balance":1904,"firstname":"Viola","lastname":"Crawford","age":35,"gender":"F","address":"354 Linwood Street","employer":"Ginkle","email":"violacrawford@ginkle.com","city":"Witmer","state":"AR"} +{"index":{"_id":"989"}} +{"account_number":989,"balance":48622,"firstname":"Franklin","lastname":"Frank","age":38,"gender":"M","address":"270 Carlton Avenue","employer":"Shopabout","email":"franklinfrank@shopabout.com","city":"Guthrie","state":"NC"} +{"index":{"_id":"991"}} +{"account_number":991,"balance":4239,"firstname":"Connie","lastname":"Berry","age":28,"gender":"F","address":"647 Gardner Avenue","employer":"Flumbo","email":"connieberry@flumbo.com","city":"Frierson","state":"MO"} +{"index":{"_id":"996"}} +{"account_number":996,"balance":17541,"firstname":"Andrews","lastname":"Herrera","age":30,"gender":"F","address":"570 Vandam Street","employer":"Klugger","email":"andrewsherrera@klugger.com","city":"Whitehaven","state":"MN"} +{"index":{"_id":"0"}} +{"account_number":0,"balance":16623,"firstname":"Bradshaw","lastname":"Mckenzie","age":29,"gender":"F","address":"244 Columbus Place","employer":"Euron","email":"bradshawmckenzie@euron.com","city":"Hobucken","state":"CO"} +{"index":{"_id":"5"}} +{"account_number":5,"balance":29342,"firstname":"Leola","lastname":"Stewart","age":30,"gender":"F","address":"311 Elm Place","employer":"Diginetic","email":"leolastewart@diginetic.com","city":"Fairview","state":"NJ"} +{"index":{"_id":"12"}} +{"account_number":12,"balance":37055,"firstname":"Stafford","lastname":"Brock","age":20,"gender":"F","address":"296 Wythe Avenue","employer":"Uncorp","email":"staffordbrock@uncorp.com","city":"Bend","state":"AL"} +{"index":{"_id":"17"}} +{"account_number":17,"balance":7831,"firstname":"Bessie","lastname":"Orr","age":31,"gender":"F","address":"239 Hinsdale Street","employer":"Skyplex","email":"bessieorr@skyplex.com","city":"Graball","state":"FL"} +{"index":{"_id":"24"}} +{"account_number":24,"balance":44182,"firstname":"Wood","lastname":"Dale","age":39,"gender":"M","address":"582 Gelston Avenue","employer":"Besto","email":"wooddale@besto.com","city":"Juntura","state":"MI"} +{"index":{"_id":"29"}} +{"account_number":29,"balance":27323,"firstname":"Leah","lastname":"Santiago","age":33,"gender":"M","address":"193 Schenck Avenue","employer":"Isologix","email":"leahsantiago@isologix.com","city":"Gerton","state":"ND"} +{"index":{"_id":"31"}} +{"account_number":31,"balance":30443,"firstname":"Kristen","lastname":"Santana","age":22,"gender":"F","address":"130 Middagh Street","employer":"Dogspa","email":"kristensantana@dogspa.com","city":"Vale","state":"MA"} +{"index":{"_id":"36"}} +{"account_number":36,"balance":15902,"firstname":"Alexandra","lastname":"Nguyen","age":39,"gender":"F","address":"389 Elizabeth Place","employer":"Bittor","email":"alexandranguyen@bittor.com","city":"Hemlock","state":"KY"} +{"index":{"_id":"43"}} +{"account_number":43,"balance":33474,"firstname":"Ryan","lastname":"Howe","age":25,"gender":"M","address":"660 Huntington Street","employer":"Microluxe","email":"ryanhowe@microluxe.com","city":"Clara","state":"CT"} +{"index":{"_id":"48"}} +{"account_number":48,"balance":40608,"firstname":"Peck","lastname":"Downs","age":39,"gender":"F","address":"594 Dwight Street","employer":"Ramjob","email":"peckdowns@ramjob.com","city":"Coloma","state":"WA"} +{"index":{"_id":"50"}} +{"account_number":50,"balance":43695,"firstname":"Sheena","lastname":"Kirkland","age":33,"gender":"M","address":"598 Bank Street","employer":"Zerbina","email":"sheenakirkland@zerbina.com","city":"Walland","state":"IN"} +{"index":{"_id":"55"}} +{"account_number":55,"balance":22020,"firstname":"Shelia","lastname":"Puckett","age":33,"gender":"M","address":"265 Royce Place","employer":"Izzby","email":"sheliapuckett@izzby.com","city":"Slovan","state":"HI"} +{"index":{"_id":"62"}} +{"account_number":62,"balance":43065,"firstname":"Lester","lastname":"Stanton","age":37,"gender":"M","address":"969 Doughty Street","employer":"Geekko","email":"lesterstanton@geekko.com","city":"Itmann","state":"DC"} +{"index":{"_id":"67"}} +{"account_number":67,"balance":39430,"firstname":"Isabelle","lastname":"Spence","age":39,"gender":"M","address":"718 Troy Avenue","employer":"Geeketron","email":"isabellespence@geeketron.com","city":"Camptown","state":"WA"} +{"index":{"_id":"74"}} +{"account_number":74,"balance":47167,"firstname":"Lauri","lastname":"Saunders","age":38,"gender":"F","address":"768 Lynch Street","employer":"Securia","email":"laurisaunders@securia.com","city":"Caroline","state":"TN"} +{"index":{"_id":"79"}} +{"account_number":79,"balance":28185,"firstname":"Booker","lastname":"Lowery","age":29,"gender":"M","address":"817 Campus Road","employer":"Sensate","email":"bookerlowery@sensate.com","city":"Carlos","state":"MT"} +{"index":{"_id":"81"}} +{"account_number":81,"balance":46568,"firstname":"Dennis","lastname":"Gilbert","age":40,"gender":"M","address":"619 Minna Street","employer":"Melbacor","email":"dennisgilbert@melbacor.com","city":"Kersey","state":"ND"} +{"index":{"_id":"86"}} +{"account_number":86,"balance":15428,"firstname":"Walton","lastname":"Butler","age":36,"gender":"M","address":"999 Schenck Street","employer":"Unisure","email":"waltonbutler@unisure.com","city":"Bentonville","state":"IL"} +{"index":{"_id":"93"}} +{"account_number":93,"balance":17728,"firstname":"Jeri","lastname":"Booth","age":31,"gender":"M","address":"322 Roosevelt Court","employer":"Geekology","email":"jeribooth@geekology.com","city":"Leming","state":"ND"} +{"index":{"_id":"98"}} +{"account_number":98,"balance":15085,"firstname":"Cora","lastname":"Barrett","age":24,"gender":"F","address":"555 Neptune Court","employer":"Kiosk","email":"corabarrett@kiosk.com","city":"Independence","state":"MN"} +{"index":{"_id":"101"}} +{"account_number":101,"balance":43400,"firstname":"Cecelia","lastname":"Grimes","age":31,"gender":"M","address":"972 Lincoln Place","employer":"Ecosys","email":"ceceliagrimes@ecosys.com","city":"Manchester","state":"AR"} +{"index":{"_id":"106"}} +{"account_number":106,"balance":8212,"firstname":"Josefina","lastname":"Wagner","age":36,"gender":"M","address":"418 Estate Road","employer":"Kyaguru","email":"josefinawagner@kyaguru.com","city":"Darbydale","state":"FL"} +{"index":{"_id":"113"}} +{"account_number":113,"balance":41652,"firstname":"Burt","lastname":"Moses","age":27,"gender":"M","address":"633 Berry Street","employer":"Uni","email":"burtmoses@uni.com","city":"Russellville","state":"CT"} +{"index":{"_id":"118"}} +{"account_number":118,"balance":2223,"firstname":"Ballard","lastname":"Vasquez","age":33,"gender":"F","address":"101 Bush Street","employer":"Intergeek","email":"ballardvasquez@intergeek.com","city":"Century","state":"MN"} +{"index":{"_id":"120"}} +{"account_number":120,"balance":38565,"firstname":"Browning","lastname":"Rodriquez","age":33,"gender":"M","address":"910 Moore Street","employer":"Opportech","email":"browningrodriquez@opportech.com","city":"Cutter","state":"ND"} +{"index":{"_id":"125"}} +{"account_number":125,"balance":5396,"firstname":"Tanisha","lastname":"Dixon","age":30,"gender":"M","address":"482 Hancock Street","employer":"Junipoor","email":"tanishadixon@junipoor.com","city":"Wauhillau","state":"IA"} +{"index":{"_id":"132"}} +{"account_number":132,"balance":37707,"firstname":"Horton","lastname":"Romero","age":35,"gender":"M","address":"427 Rutherford Place","employer":"Affluex","email":"hortonromero@affluex.com","city":"Hall","state":"AK"} +{"index":{"_id":"137"}} +{"account_number":137,"balance":3596,"firstname":"Frost","lastname":"Freeman","age":29,"gender":"F","address":"191 Dennett Place","employer":"Beadzza","email":"frostfreeman@beadzza.com","city":"Sabillasville","state":"HI"} +{"index":{"_id":"144"}} +{"account_number":144,"balance":43257,"firstname":"Evans","lastname":"Dyer","age":30,"gender":"F","address":"912 Post Court","employer":"Magmina","email":"evansdyer@magmina.com","city":"Gordon","state":"HI"} +{"index":{"_id":"149"}} +{"account_number":149,"balance":22994,"firstname":"Megan","lastname":"Gonzales","age":21,"gender":"M","address":"836 Tampa Court","employer":"Andershun","email":"megangonzales@andershun.com","city":"Rockhill","state":"AL"} +{"index":{"_id":"151"}} +{"account_number":151,"balance":34473,"firstname":"Kent","lastname":"Joyner","age":20,"gender":"F","address":"799 Truxton Street","employer":"Kozgene","email":"kentjoyner@kozgene.com","city":"Allamuchy","state":"DC"} +{"index":{"_id":"156"}} +{"account_number":156,"balance":40185,"firstname":"Sloan","lastname":"Pennington","age":24,"gender":"F","address":"573 Opal Court","employer":"Hopeli","email":"sloanpennington@hopeli.com","city":"Evergreen","state":"CT"} +{"index":{"_id":"163"}} +{"account_number":163,"balance":43075,"firstname":"Wilda","lastname":"Norman","age":33,"gender":"F","address":"173 Beadel Street","employer":"Kog","email":"wildanorman@kog.com","city":"Bodega","state":"ME"} +{"index":{"_id":"168"}} +{"account_number":168,"balance":49568,"firstname":"Carissa","lastname":"Simon","age":20,"gender":"M","address":"975 Flatbush Avenue","employer":"Zillacom","email":"carissasimon@zillacom.com","city":"Neibert","state":"IL"} +{"index":{"_id":"170"}} +{"account_number":170,"balance":6025,"firstname":"Mann","lastname":"Madden","age":36,"gender":"F","address":"161 Radde Place","employer":"Farmex","email":"mannmadden@farmex.com","city":"Thermal","state":"LA"} +{"index":{"_id":"175"}} +{"account_number":175,"balance":16213,"firstname":"Montoya","lastname":"Donaldson","age":28,"gender":"F","address":"481 Morton Street","employer":"Envire","email":"montoyadonaldson@envire.com","city":"Delco","state":"MA"} +{"index":{"_id":"182"}} +{"account_number":182,"balance":7803,"firstname":"Manuela","lastname":"Dillon","age":21,"gender":"M","address":"742 Garnet Street","employer":"Moreganic","email":"manueladillon@moreganic.com","city":"Ilchester","state":"TX"} +{"index":{"_id":"187"}} +{"account_number":187,"balance":26581,"firstname":"Autumn","lastname":"Hodges","age":35,"gender":"M","address":"757 Granite Street","employer":"Ezentia","email":"autumnhodges@ezentia.com","city":"Martinsville","state":"KY"} +{"index":{"_id":"194"}} +{"account_number":194,"balance":16311,"firstname":"Beck","lastname":"Rosario","age":39,"gender":"M","address":"721 Cambridge Place","employer":"Zoid","email":"beckrosario@zoid.com","city":"Efland","state":"ID"} +{"index":{"_id":"199"}} +{"account_number":199,"balance":18086,"firstname":"Branch","lastname":"Love","age":26,"gender":"M","address":"458 Commercial Street","employer":"Frolix","email":"branchlove@frolix.com","city":"Caspar","state":"NC"} +{"index":{"_id":"202"}} +{"account_number":202,"balance":26466,"firstname":"Medina","lastname":"Brown","age":31,"gender":"F","address":"519 Sunnyside Court","employer":"Bleendot","email":"medinabrown@bleendot.com","city":"Winfred","state":"MI"} +{"index":{"_id":"207"}} +{"account_number":207,"balance":45535,"firstname":"Evelyn","lastname":"Lara","age":35,"gender":"F","address":"636 Chestnut Street","employer":"Ultrasure","email":"evelynlara@ultrasure.com","city":"Logan","state":"MI"} +{"index":{"_id":"214"}} +{"account_number":214,"balance":24418,"firstname":"Luann","lastname":"Faulkner","age":37,"gender":"F","address":"697 Hazel Court","employer":"Zolar","email":"luannfaulkner@zolar.com","city":"Ticonderoga","state":"TX"} +{"index":{"_id":"219"}} +{"account_number":219,"balance":17127,"firstname":"Edwards","lastname":"Hurley","age":25,"gender":"M","address":"834 Stockholm Street","employer":"Austech","email":"edwardshurley@austech.com","city":"Bayview","state":"NV"} +{"index":{"_id":"221"}} +{"account_number":221,"balance":15803,"firstname":"Benjamin","lastname":"Barrera","age":34,"gender":"M","address":"568 Main Street","employer":"Zaphire","email":"benjaminbarrera@zaphire.com","city":"Germanton","state":"WY"} +{"index":{"_id":"226"}} +{"account_number":226,"balance":37720,"firstname":"Wilkins","lastname":"Brady","age":40,"gender":"F","address":"486 Baltic Street","employer":"Dogtown","email":"wilkinsbrady@dogtown.com","city":"Condon","state":"MT"} +{"index":{"_id":"233"}} +{"account_number":233,"balance":23020,"firstname":"Washington","lastname":"Walsh","age":27,"gender":"M","address":"366 Church Avenue","employer":"Candecor","email":"washingtonwalsh@candecor.com","city":"Westphalia","state":"MA"} +{"index":{"_id":"238"}} +{"account_number":238,"balance":21287,"firstname":"Constance","lastname":"Wong","age":28,"gender":"M","address":"496 Brown Street","employer":"Grainspot","email":"constancewong@grainspot.com","city":"Cecilia","state":"IN"} +{"index":{"_id":"240"}} +{"account_number":240,"balance":49741,"firstname":"Oconnor","lastname":"Clay","age":35,"gender":"F","address":"659 Highland Boulevard","employer":"Franscene","email":"oconnorclay@franscene.com","city":"Kilbourne","state":"NH"} +{"index":{"_id":"245"}} +{"account_number":245,"balance":22026,"firstname":"Fran","lastname":"Bolton","age":28,"gender":"F","address":"147 Jerome Street","employer":"Solaren","email":"franbolton@solaren.com","city":"Nash","state":"RI"} +{"index":{"_id":"252"}} +{"account_number":252,"balance":18831,"firstname":"Elvia","lastname":"Poole","age":22,"gender":"F","address":"836 Delevan Street","employer":"Velity","email":"elviapoole@velity.com","city":"Groveville","state":"MI"} +{"index":{"_id":"257"}} +{"account_number":257,"balance":5318,"firstname":"Olive","lastname":"Oneil","age":35,"gender":"F","address":"457 Decatur Street","employer":"Helixo","email":"oliveoneil@helixo.com","city":"Chicopee","state":"MI"} +{"index":{"_id":"264"}} +{"account_number":264,"balance":22084,"firstname":"Samantha","lastname":"Ferrell","age":35,"gender":"F","address":"488 Fulton Street","employer":"Flum","email":"samanthaferrell@flum.com","city":"Brandywine","state":"MT"} +{"index":{"_id":"269"}} +{"account_number":269,"balance":43317,"firstname":"Crosby","lastname":"Figueroa","age":34,"gender":"M","address":"910 Aurelia Court","employer":"Pyramia","email":"crosbyfigueroa@pyramia.com","city":"Leyner","state":"OH"} +{"index":{"_id":"271"}} +{"account_number":271,"balance":11864,"firstname":"Holt","lastname":"Walter","age":30,"gender":"F","address":"645 Poplar Avenue","employer":"Grupoli","email":"holtwalter@grupoli.com","city":"Mansfield","state":"OR"} +{"index":{"_id":"276"}} +{"account_number":276,"balance":11606,"firstname":"Pittman","lastname":"Mathis","age":23,"gender":"F","address":"567 Charles Place","employer":"Zuvy","email":"pittmanmathis@zuvy.com","city":"Roeville","state":"KY"} +{"index":{"_id":"283"}} +{"account_number":283,"balance":24070,"firstname":"Fuentes","lastname":"Foley","age":30,"gender":"M","address":"729 Walker Court","employer":"Knowlysis","email":"fuentesfoley@knowlysis.com","city":"Tryon","state":"TN"} +{"index":{"_id":"288"}} +{"account_number":288,"balance":27243,"firstname":"Wong","lastname":"Stone","age":39,"gender":"F","address":"440 Willoughby Street","employer":"Zentix","email":"wongstone@zentix.com","city":"Wheatfields","state":"DC"} +{"index":{"_id":"290"}} +{"account_number":290,"balance":26103,"firstname":"Neva","lastname":"Burgess","age":37,"gender":"F","address":"985 Wyona Street","employer":"Slofast","email":"nevaburgess@slofast.com","city":"Cawood","state":"DC"} +{"index":{"_id":"295"}} +{"account_number":295,"balance":37358,"firstname":"Howe","lastname":"Nash","age":20,"gender":"M","address":"833 Union Avenue","employer":"Aquacine","email":"howenash@aquacine.com","city":"Indio","state":"MN"} +{"index":{"_id":"303"}} +{"account_number":303,"balance":21976,"firstname":"Huffman","lastname":"Green","age":24,"gender":"F","address":"455 Colby Court","employer":"Comtest","email":"huffmangreen@comtest.com","city":"Weeksville","state":"UT"} +{"index":{"_id":"308"}} +{"account_number":308,"balance":33989,"firstname":"Glass","lastname":"Schroeder","age":25,"gender":"F","address":"670 Veterans Avenue","employer":"Realmo","email":"glassschroeder@realmo.com","city":"Gratton","state":"NY"} +{"index":{"_id":"310"}} +{"account_number":310,"balance":23049,"firstname":"Shannon","lastname":"Morton","age":39,"gender":"F","address":"412 Pleasant Place","employer":"Ovation","email":"shannonmorton@ovation.com","city":"Edgar","state":"AZ"} +{"index":{"_id":"315"}} +{"account_number":315,"balance":1314,"firstname":"Clare","lastname":"Morrow","age":33,"gender":"F","address":"728 Madeline Court","employer":"Gaptec","email":"claremorrow@gaptec.com","city":"Mapletown","state":"PA"} +{"index":{"_id":"322"}} +{"account_number":322,"balance":6303,"firstname":"Gilliam","lastname":"Horne","age":27,"gender":"M","address":"414 Florence Avenue","employer":"Shepard","email":"gilliamhorne@shepard.com","city":"Winesburg","state":"WY"} +{"index":{"_id":"327"}} +{"account_number":327,"balance":29294,"firstname":"Nell","lastname":"Contreras","age":27,"gender":"M","address":"694 Gold Street","employer":"Momentia","email":"nellcontreras@momentia.com","city":"Cumminsville","state":"AL"} +{"index":{"_id":"334"}} +{"account_number":334,"balance":9178,"firstname":"Cross","lastname":"Floyd","age":21,"gender":"F","address":"815 Herkimer Court","employer":"Maroptic","email":"crossfloyd@maroptic.com","city":"Kraemer","state":"AK"} +{"index":{"_id":"339"}} +{"account_number":339,"balance":3992,"firstname":"Franco","lastname":"Welch","age":38,"gender":"F","address":"776 Brightwater Court","employer":"Earthplex","email":"francowelch@earthplex.com","city":"Naomi","state":"ME"} +{"index":{"_id":"341"}} +{"account_number":341,"balance":44367,"firstname":"Alberta","lastname":"Bradford","age":30,"gender":"F","address":"670 Grant Avenue","employer":"Bugsall","email":"albertabradford@bugsall.com","city":"Romeville","state":"MT"} +{"index":{"_id":"346"}} +{"account_number":346,"balance":26594,"firstname":"Shelby","lastname":"Sanchez","age":36,"gender":"F","address":"257 Fillmore Avenue","employer":"Geekus","email":"shelbysanchez@geekus.com","city":"Seymour","state":"CO"} +{"index":{"_id":"353"}} +{"account_number":353,"balance":45182,"firstname":"Rivera","lastname":"Sherman","age":37,"gender":"M","address":"603 Garden Place","employer":"Bovis","email":"riverasherman@bovis.com","city":"Otranto","state":"CA"} +{"index":{"_id":"358"}} +{"account_number":358,"balance":44043,"firstname":"Hale","lastname":"Baldwin","age":40,"gender":"F","address":"845 Menahan Street","employer":"Kidgrease","email":"halebaldwin@kidgrease.com","city":"Day","state":"AK"} +{"index":{"_id":"360"}} +{"account_number":360,"balance":26651,"firstname":"Ward","lastname":"Hicks","age":34,"gender":"F","address":"592 Brighton Court","employer":"Biotica","email":"wardhicks@biotica.com","city":"Kanauga","state":"VT"} +{"index":{"_id":"365"}} +{"account_number":365,"balance":3176,"firstname":"Sanders","lastname":"Holder","age":31,"gender":"F","address":"453 Cypress Court","employer":"Geekola","email":"sandersholder@geekola.com","city":"Staples","state":"TN"} +{"index":{"_id":"372"}} +{"account_number":372,"balance":28566,"firstname":"Alba","lastname":"Forbes","age":24,"gender":"M","address":"814 Meserole Avenue","employer":"Isostream","email":"albaforbes@isostream.com","city":"Clarence","state":"OR"} +{"index":{"_id":"377"}} +{"account_number":377,"balance":5374,"firstname":"Margo","lastname":"Gay","age":34,"gender":"F","address":"613 Chase Court","employer":"Rotodyne","email":"margogay@rotodyne.com","city":"Waumandee","state":"KS"} +{"index":{"_id":"384"}} +{"account_number":384,"balance":48758,"firstname":"Sallie","lastname":"Houston","age":31,"gender":"F","address":"836 Polar Street","employer":"Squish","email":"salliehouston@squish.com","city":"Morningside","state":"NC"} +{"index":{"_id":"389"}} +{"account_number":389,"balance":8839,"firstname":"York","lastname":"Cummings","age":27,"gender":"M","address":"778 Centre Street","employer":"Insurity","email":"yorkcummings@insurity.com","city":"Freeburn","state":"RI"} +{"index":{"_id":"391"}} +{"account_number":391,"balance":14733,"firstname":"Holman","lastname":"Jordan","age":30,"gender":"M","address":"391 Forrest Street","employer":"Maineland","email":"holmanjordan@maineland.com","city":"Cade","state":"CT"} +{"index":{"_id":"396"}} +{"account_number":396,"balance":14613,"firstname":"Marsha","lastname":"Elliott","age":38,"gender":"F","address":"297 Liberty Avenue","employer":"Orbiflex","email":"marshaelliott@orbiflex.com","city":"Windsor","state":"TX"} +{"index":{"_id":"404"}} +{"account_number":404,"balance":34978,"firstname":"Massey","lastname":"Becker","age":26,"gender":"F","address":"930 Pitkin Avenue","employer":"Genekom","email":"masseybecker@genekom.com","city":"Blairstown","state":"OR"} +{"index":{"_id":"409"}} +{"account_number":409,"balance":36960,"firstname":"Maura","lastname":"Glenn","age":31,"gender":"M","address":"183 Poly Place","employer":"Viagreat","email":"mauraglenn@viagreat.com","city":"Foscoe","state":"DE"} +{"index":{"_id":"411"}} +{"account_number":411,"balance":1172,"firstname":"Guzman","lastname":"Whitfield","age":22,"gender":"M","address":"181 Perry Terrace","employer":"Springbee","email":"guzmanwhitfield@springbee.com","city":"Balm","state":"IN"} +{"index":{"_id":"416"}} +{"account_number":416,"balance":27169,"firstname":"Hunt","lastname":"Schwartz","age":28,"gender":"F","address":"461 Havens Place","employer":"Danja","email":"huntschwartz@danja.com","city":"Grenelefe","state":"NV"} +{"index":{"_id":"423"}} +{"account_number":423,"balance":38852,"firstname":"Hines","lastname":"Underwood","age":21,"gender":"F","address":"284 Louise Terrace","employer":"Namegen","email":"hinesunderwood@namegen.com","city":"Downsville","state":"CO"} +{"index":{"_id":"428"}} +{"account_number":428,"balance":13925,"firstname":"Stephens","lastname":"Cain","age":20,"gender":"F","address":"189 Summit Street","employer":"Rocklogic","email":"stephenscain@rocklogic.com","city":"Bourg","state":"HI"} +{"index":{"_id":"430"}} +{"account_number":430,"balance":15251,"firstname":"Alejandra","lastname":"Chavez","age":34,"gender":"M","address":"651 Butler Place","employer":"Gology","email":"alejandrachavez@gology.com","city":"Allensworth","state":"VT"} +{"index":{"_id":"435"}} +{"account_number":435,"balance":14654,"firstname":"Sue","lastname":"Lopez","age":22,"gender":"F","address":"632 Stone Avenue","employer":"Emergent","email":"suelopez@emergent.com","city":"Waterford","state":"TN"} +{"index":{"_id":"442"}} +{"account_number":442,"balance":36211,"firstname":"Lawanda","lastname":"Leon","age":27,"gender":"F","address":"126 Canal Avenue","employer":"Xixan","email":"lawandaleon@xixan.com","city":"Berwind","state":"TN"} +{"index":{"_id":"447"}} +{"account_number":447,"balance":11402,"firstname":"Lucia","lastname":"Livingston","age":35,"gender":"M","address":"773 Lake Avenue","employer":"Soprano","email":"lucialivingston@soprano.com","city":"Edgewater","state":"TN"} +{"index":{"_id":"454"}} +{"account_number":454,"balance":31687,"firstname":"Alicia","lastname":"Rollins","age":22,"gender":"F","address":"483 Verona Place","employer":"Boilcat","email":"aliciarollins@boilcat.com","city":"Lutsen","state":"MD"} +{"index":{"_id":"459"}} +{"account_number":459,"balance":18869,"firstname":"Pamela","lastname":"Henry","age":20,"gender":"F","address":"361 Locust Avenue","employer":"Imageflow","email":"pamelahenry@imageflow.com","city":"Greenfields","state":"OH"} +{"index":{"_id":"461"}} +{"account_number":461,"balance":38807,"firstname":"Mcbride","lastname":"Padilla","age":34,"gender":"F","address":"550 Borinquen Pl","employer":"Zepitope","email":"mcbridepadilla@zepitope.com","city":"Emory","state":"AZ"} +{"index":{"_id":"466"}} +{"account_number":466,"balance":25109,"firstname":"Marcie","lastname":"Mcmillan","age":30,"gender":"F","address":"947 Gain Court","employer":"Entroflex","email":"marciemcmillan@entroflex.com","city":"Ronco","state":"ND"} +{"index":{"_id":"473"}} +{"account_number":473,"balance":5391,"firstname":"Susan","lastname":"Luna","age":25,"gender":"F","address":"521 Bogart Street","employer":"Zaya","email":"susanluna@zaya.com","city":"Grazierville","state":"MI"} +{"index":{"_id":"478"}} +{"account_number":478,"balance":28044,"firstname":"Dana","lastname":"Decker","age":35,"gender":"M","address":"627 Dobbin Street","employer":"Acrodance","email":"danadecker@acrodance.com","city":"Sharon","state":"MN"} +{"index":{"_id":"480"}} +{"account_number":480,"balance":40807,"firstname":"Anastasia","lastname":"Parker","age":24,"gender":"M","address":"650 Folsom Place","employer":"Zilladyne","email":"anastasiaparker@zilladyne.com","city":"Oberlin","state":"WY"} +{"index":{"_id":"485"}} +{"account_number":485,"balance":44235,"firstname":"Albert","lastname":"Roberts","age":40,"gender":"M","address":"385 Harman Street","employer":"Stralum","email":"albertroberts@stralum.com","city":"Watrous","state":"NM"} +{"index":{"_id":"492"}} +{"account_number":492,"balance":31055,"firstname":"Burnett","lastname":"Briggs","age":35,"gender":"M","address":"987 Cass Place","employer":"Pharmex","email":"burnettbriggs@pharmex.com","city":"Cornfields","state":"TX"} +{"index":{"_id":"497"}} +{"account_number":497,"balance":13493,"firstname":"Doyle","lastname":"Jenkins","age":30,"gender":"M","address":"205 Nevins Street","employer":"Unia","email":"doylejenkins@unia.com","city":"Nicut","state":"DC"} +{"index":{"_id":"500"}} +{"account_number":500,"balance":39143,"firstname":"Pope","lastname":"Keith","age":28,"gender":"F","address":"537 Fane Court","employer":"Zboo","email":"popekeith@zboo.com","city":"Courtland","state":"AL"} +{"index":{"_id":"505"}} +{"account_number":505,"balance":45493,"firstname":"Shelley","lastname":"Webb","age":29,"gender":"M","address":"873 Crawford Avenue","employer":"Quadeebo","email":"shelleywebb@quadeebo.com","city":"Topanga","state":"IL"} +{"index":{"_id":"512"}} +{"account_number":512,"balance":47432,"firstname":"Alisha","lastname":"Morales","age":29,"gender":"M","address":"623 Batchelder Street","employer":"Terragen","email":"alishamorales@terragen.com","city":"Gilmore","state":"VA"} +{"index":{"_id":"517"}} +{"account_number":517,"balance":3022,"firstname":"Allyson","lastname":"Walls","age":38,"gender":"F","address":"334 Coffey Street","employer":"Gorganic","email":"allysonwalls@gorganic.com","city":"Dahlen","state":"GA"} +{"index":{"_id":"524"}} +{"account_number":524,"balance":49334,"firstname":"Salas","lastname":"Farley","age":30,"gender":"F","address":"499 Trucklemans Lane","employer":"Xumonk","email":"salasfarley@xumonk.com","city":"Noxen","state":"AL"} +{"index":{"_id":"529"}} +{"account_number":529,"balance":21788,"firstname":"Deann","lastname":"Fisher","age":23,"gender":"F","address":"511 Buffalo Avenue","employer":"Twiist","email":"deannfisher@twiist.com","city":"Templeton","state":"WA"} +{"index":{"_id":"531"}} +{"account_number":531,"balance":39770,"firstname":"Janet","lastname":"Pena","age":38,"gender":"M","address":"645 Livonia Avenue","employer":"Corecom","email":"janetpena@corecom.com","city":"Garberville","state":"OK"} +{"index":{"_id":"536"}} +{"account_number":536,"balance":6255,"firstname":"Emma","lastname":"Adkins","age":33,"gender":"F","address":"971 Calder Place","employer":"Ontagene","email":"emmaadkins@ontagene.com","city":"Ruckersville","state":"GA"} +{"index":{"_id":"543"}} +{"account_number":543,"balance":48022,"firstname":"Marina","lastname":"Rasmussen","age":31,"gender":"M","address":"446 Love Lane","employer":"Crustatia","email":"marinarasmussen@crustatia.com","city":"Statenville","state":"MD"} +{"index":{"_id":"548"}} +{"account_number":548,"balance":36930,"firstname":"Sandra","lastname":"Andrews","age":37,"gender":"M","address":"973 Prospect Street","employer":"Datagene","email":"sandraandrews@datagene.com","city":"Inkerman","state":"MO"} +{"index":{"_id":"550"}} +{"account_number":550,"balance":32238,"firstname":"Walsh","lastname":"Goodwin","age":22,"gender":"M","address":"953 Canda Avenue","employer":"Proflex","email":"walshgoodwin@proflex.com","city":"Ypsilanti","state":"MT"} +{"index":{"_id":"555"}} +{"account_number":555,"balance":10750,"firstname":"Fannie","lastname":"Slater","age":31,"gender":"M","address":"457 Tech Place","employer":"Kineticut","email":"fannieslater@kineticut.com","city":"Basye","state":"MO"} +{"index":{"_id":"562"}} +{"account_number":562,"balance":10737,"firstname":"Sarah","lastname":"Strong","age":39,"gender":"F","address":"177 Pioneer Street","employer":"Megall","email":"sarahstrong@megall.com","city":"Ladera","state":"WY"} +{"index":{"_id":"567"}} +{"account_number":567,"balance":6507,"firstname":"Diana","lastname":"Dominguez","age":40,"gender":"M","address":"419 Albany Avenue","employer":"Ohmnet","email":"dianadominguez@ohmnet.com","city":"Wildwood","state":"TX"} +{"index":{"_id":"574"}} +{"account_number":574,"balance":32954,"firstname":"Andrea","lastname":"Mosley","age":24,"gender":"M","address":"368 Throop Avenue","employer":"Musix","email":"andreamosley@musix.com","city":"Blende","state":"DC"} +{"index":{"_id":"579"}} +{"account_number":579,"balance":12044,"firstname":"Banks","lastname":"Sawyer","age":36,"gender":"M","address":"652 Doone Court","employer":"Rooforia","email":"bankssawyer@rooforia.com","city":"Foxworth","state":"ND"} +{"index":{"_id":"581"}} +{"account_number":581,"balance":16525,"firstname":"Fuller","lastname":"Mcintyre","age":32,"gender":"M","address":"169 Bergen Place","employer":"Applideck","email":"fullermcintyre@applideck.com","city":"Kenvil","state":"NY"} +{"index":{"_id":"586"}} +{"account_number":586,"balance":13644,"firstname":"Love","lastname":"Velasquez","age":26,"gender":"F","address":"290 Girard Street","employer":"Zomboid","email":"lovevelasquez@zomboid.com","city":"Villarreal","state":"SD"} +{"index":{"_id":"593"}} +{"account_number":593,"balance":41230,"firstname":"Muriel","lastname":"Vazquez","age":37,"gender":"M","address":"395 Montgomery Street","employer":"Sustenza","email":"murielvazquez@sustenza.com","city":"Strykersville","state":"OK"} +{"index":{"_id":"598"}} +{"account_number":598,"balance":33251,"firstname":"Morgan","lastname":"Coleman","age":33,"gender":"M","address":"324 McClancy Place","employer":"Aclima","email":"morgancoleman@aclima.com","city":"Bowden","state":"WA"} +{"index":{"_id":"601"}} +{"account_number":601,"balance":20796,"firstname":"Vickie","lastname":"Valentine","age":34,"gender":"F","address":"432 Bassett Avenue","employer":"Comvene","email":"vickievalentine@comvene.com","city":"Teasdale","state":"UT"} +{"index":{"_id":"606"}} +{"account_number":606,"balance":28770,"firstname":"Michael","lastname":"Bray","age":31,"gender":"M","address":"935 Lake Place","employer":"Telepark","email":"michaelbray@telepark.com","city":"Lemoyne","state":"CT"} +{"index":{"_id":"613"}} +{"account_number":613,"balance":39340,"firstname":"Eddie","lastname":"Mccarty","age":34,"gender":"F","address":"971 Richards Street","employer":"Bisba","email":"eddiemccarty@bisba.com","city":"Fruitdale","state":"NY"} +{"index":{"_id":"618"}} +{"account_number":618,"balance":8976,"firstname":"Cheri","lastname":"Ford","age":30,"gender":"F","address":"803 Ridgewood Avenue","employer":"Zorromop","email":"cheriford@zorromop.com","city":"Gambrills","state":"VT"} +{"index":{"_id":"620"}} +{"account_number":620,"balance":7224,"firstname":"Coleen","lastname":"Bartlett","age":38,"gender":"M","address":"761 Carroll Street","employer":"Idealis","email":"coleenbartlett@idealis.com","city":"Mathews","state":"DE"} +{"index":{"_id":"625"}} +{"account_number":625,"balance":46010,"firstname":"Cynthia","lastname":"Johnston","age":23,"gender":"M","address":"142 Box Street","employer":"Zentry","email":"cynthiajohnston@zentry.com","city":"Makena","state":"MA"} +{"index":{"_id":"632"}} +{"account_number":632,"balance":40470,"firstname":"Kay","lastname":"Warren","age":20,"gender":"F","address":"422 Alabama Avenue","employer":"Realysis","email":"kaywarren@realysis.com","city":"Homestead","state":"HI"} +{"index":{"_id":"637"}} +{"account_number":637,"balance":3169,"firstname":"Kathy","lastname":"Carter","age":27,"gender":"F","address":"410 Jamison Lane","employer":"Limage","email":"kathycarter@limage.com","city":"Ernstville","state":"WA"} +{"index":{"_id":"644"}} +{"account_number":644,"balance":44021,"firstname":"Etta","lastname":"Miller","age":21,"gender":"F","address":"376 Lawton Street","employer":"Bluegrain","email":"ettamiller@bluegrain.com","city":"Baker","state":"MD"} +{"index":{"_id":"649"}} +{"account_number":649,"balance":20275,"firstname":"Jeanine","lastname":"Malone","age":26,"gender":"F","address":"114 Dodworth Street","employer":"Nixelt","email":"jeaninemalone@nixelt.com","city":"Keyport","state":"AK"} +{"index":{"_id":"651"}} +{"account_number":651,"balance":18360,"firstname":"Young","lastname":"Reeves","age":34,"gender":"M","address":"581 Plaza Street","employer":"Krog","email":"youngreeves@krog.com","city":"Sussex","state":"WY"} +{"index":{"_id":"656"}} +{"account_number":656,"balance":21632,"firstname":"Olson","lastname":"Hunt","age":36,"gender":"M","address":"342 Jaffray Street","employer":"Volax","email":"olsonhunt@volax.com","city":"Bangor","state":"WA"} +{"index":{"_id":"663"}} +{"account_number":663,"balance":2456,"firstname":"Rollins","lastname":"Richards","age":37,"gender":"M","address":"129 Sullivan Place","employer":"Geostele","email":"rollinsrichards@geostele.com","city":"Morgandale","state":"FL"} +{"index":{"_id":"668"}} +{"account_number":668,"balance":45069,"firstname":"Potter","lastname":"Michael","age":27,"gender":"M","address":"803 Glenmore Avenue","employer":"Ontality","email":"pottermichael@ontality.com","city":"Newkirk","state":"KS"} +{"index":{"_id":"670"}} +{"account_number":670,"balance":10178,"firstname":"Ollie","lastname":"Riley","age":22,"gender":"M","address":"252 Jackson Place","employer":"Adornica","email":"ollieriley@adornica.com","city":"Brethren","state":"WI"} +{"index":{"_id":"675"}} +{"account_number":675,"balance":36102,"firstname":"Fisher","lastname":"Shepard","age":27,"gender":"F","address":"859 Varick Street","employer":"Qot","email":"fishershepard@qot.com","city":"Diaperville","state":"MD"} +{"index":{"_id":"682"}} +{"account_number":682,"balance":14168,"firstname":"Anne","lastname":"Hale","age":22,"gender":"F","address":"708 Anthony Street","employer":"Cytrek","email":"annehale@cytrek.com","city":"Beechmont","state":"WV"} +{"index":{"_id":"687"}} +{"account_number":687,"balance":48630,"firstname":"Caroline","lastname":"Cox","age":31,"gender":"M","address":"626 Hillel Place","employer":"Opticon","email":"carolinecox@opticon.com","city":"Loma","state":"ND"} +{"index":{"_id":"694"}} +{"account_number":694,"balance":33125,"firstname":"Craig","lastname":"Palmer","age":31,"gender":"F","address":"273 Montrose Avenue","employer":"Comvey","email":"craigpalmer@comvey.com","city":"Cleary","state":"OK"} +{"index":{"_id":"699"}} +{"account_number":699,"balance":4156,"firstname":"Gallagher","lastname":"Marshall","age":37,"gender":"F","address":"648 Clifford Place","employer":"Exiand","email":"gallaghermarshall@exiand.com","city":"Belfair","state":"KY"} +{"index":{"_id":"702"}} +{"account_number":702,"balance":46490,"firstname":"Meadows","lastname":"Delgado","age":26,"gender":"M","address":"612 Jardine Place","employer":"Daisu","email":"meadowsdelgado@daisu.com","city":"Venice","state":"AR"} +{"index":{"_id":"707"}} +{"account_number":707,"balance":30325,"firstname":"Sonya","lastname":"Trevino","age":30,"gender":"F","address":"181 Irving Place","employer":"Atgen","email":"sonyatrevino@atgen.com","city":"Enetai","state":"TN"} +{"index":{"_id":"714"}} +{"account_number":714,"balance":16602,"firstname":"Socorro","lastname":"Murray","age":34,"gender":"F","address":"810 Manhattan Court","employer":"Isoswitch","email":"socorromurray@isoswitch.com","city":"Jugtown","state":"AZ"} +{"index":{"_id":"719"}} +{"account_number":719,"balance":33107,"firstname":"Leanna","lastname":"Reed","age":25,"gender":"F","address":"528 Krier Place","employer":"Rodeology","email":"leannareed@rodeology.com","city":"Carrizo","state":"WI"} +{"index":{"_id":"721"}} +{"account_number":721,"balance":32958,"firstname":"Mara","lastname":"Dickson","age":26,"gender":"M","address":"810 Harrison Avenue","employer":"Comtours","email":"maradickson@comtours.com","city":"Thynedale","state":"DE"} +{"index":{"_id":"726"}} +{"account_number":726,"balance":44737,"firstname":"Rosemary","lastname":"Salazar","age":21,"gender":"M","address":"290 Croton Loop","employer":"Rockabye","email":"rosemarysalazar@rockabye.com","city":"Helen","state":"IA"} +{"index":{"_id":"733"}} +{"account_number":733,"balance":15722,"firstname":"Lakeisha","lastname":"Mccarthy","age":37,"gender":"M","address":"782 Turnbull Avenue","employer":"Exosis","email":"lakeishamccarthy@exosis.com","city":"Caberfae","state":"NM"} +{"index":{"_id":"738"}} +{"account_number":738,"balance":44936,"firstname":"Rosalind","lastname":"Hunter","age":32,"gender":"M","address":"644 Eaton Court","employer":"Zolarity","email":"rosalindhunter@zolarity.com","city":"Cataract","state":"SD"} +{"index":{"_id":"740"}} +{"account_number":740,"balance":6143,"firstname":"Chambers","lastname":"Hahn","age":22,"gender":"M","address":"937 Windsor Place","employer":"Medalert","email":"chambershahn@medalert.com","city":"Dorneyville","state":"DC"} +{"index":{"_id":"745"}} +{"account_number":745,"balance":4572,"firstname":"Jacobs","lastname":"Sweeney","age":32,"gender":"M","address":"189 Lott Place","employer":"Comtent","email":"jacobssweeney@comtent.com","city":"Advance","state":"NJ"} +{"index":{"_id":"752"}} +{"account_number":752,"balance":14039,"firstname":"Jerry","lastname":"Rush","age":31,"gender":"M","address":"632 Dank Court","employer":"Ebidco","email":"jerryrush@ebidco.com","city":"Geyserville","state":"AR"} +{"index":{"_id":"757"}} +{"account_number":757,"balance":34628,"firstname":"Mccullough","lastname":"Moore","age":30,"gender":"F","address":"304 Hastings Street","employer":"Nikuda","email":"mcculloughmoore@nikuda.com","city":"Charco","state":"DC"} +{"index":{"_id":"764"}} +{"account_number":764,"balance":3728,"firstname":"Noemi","lastname":"Gill","age":30,"gender":"M","address":"427 Chester Street","employer":"Avit","email":"noemigill@avit.com","city":"Chesterfield","state":"AL"} +{"index":{"_id":"769"}} +{"account_number":769,"balance":15362,"firstname":"Francis","lastname":"Beck","age":28,"gender":"M","address":"454 Livingston Street","employer":"Furnafix","email":"francisbeck@furnafix.com","city":"Dunnavant","state":"HI"} +{"index":{"_id":"771"}} +{"account_number":771,"balance":32784,"firstname":"Jocelyn","lastname":"Boone","age":23,"gender":"M","address":"513 Division Avenue","employer":"Collaire","email":"jocelynboone@collaire.com","city":"Lisco","state":"VT"} +{"index":{"_id":"776"}} +{"account_number":776,"balance":29177,"firstname":"Duke","lastname":"Atkinson","age":24,"gender":"M","address":"520 Doscher Street","employer":"Tripsch","email":"dukeatkinson@tripsch.com","city":"Lafferty","state":"NC"} +{"index":{"_id":"783"}} +{"account_number":783,"balance":11911,"firstname":"Faith","lastname":"Cooper","age":25,"gender":"F","address":"539 Rapelye Street","employer":"Insuron","email":"faithcooper@insuron.com","city":"Jennings","state":"MN"} +{"index":{"_id":"788"}} +{"account_number":788,"balance":12473,"firstname":"Marianne","lastname":"Aguilar","age":39,"gender":"F","address":"213 Holly Street","employer":"Marqet","email":"marianneaguilar@marqet.com","city":"Alfarata","state":"HI"} +{"index":{"_id":"790"}} +{"account_number":790,"balance":29912,"firstname":"Ellis","lastname":"Sullivan","age":39,"gender":"F","address":"877 Coyle Street","employer":"Enersave","email":"ellissullivan@enersave.com","city":"Canby","state":"MS"} +{"index":{"_id":"795"}} +{"account_number":795,"balance":31450,"firstname":"Bruce","lastname":"Avila","age":34,"gender":"M","address":"865 Newkirk Placez","employer":"Plasmosis","email":"bruceavila@plasmosis.com","city":"Ada","state":"ID"} +{"index":{"_id":"803"}} +{"account_number":803,"balance":49567,"firstname":"Marissa","lastname":"Spears","age":25,"gender":"M","address":"963 Highland Avenue","employer":"Centregy","email":"marissaspears@centregy.com","city":"Bloomington","state":"MS"} +{"index":{"_id":"808"}} +{"account_number":808,"balance":11251,"firstname":"Nola","lastname":"Quinn","age":20,"gender":"M","address":"863 Wythe Place","employer":"Iplax","email":"nolaquinn@iplax.com","city":"Cuylerville","state":"NH"} +{"index":{"_id":"810"}} +{"account_number":810,"balance":10563,"firstname":"Alyssa","lastname":"Ortega","age":40,"gender":"M","address":"977 Clymer Street","employer":"Eventage","email":"alyssaortega@eventage.com","city":"Convent","state":"SC"} +{"index":{"_id":"815"}} +{"account_number":815,"balance":19336,"firstname":"Guthrie","lastname":"Morse","age":30,"gender":"M","address":"685 Vandalia Avenue","employer":"Gronk","email":"guthriemorse@gronk.com","city":"Fowlerville","state":"OR"} +{"index":{"_id":"822"}} +{"account_number":822,"balance":13024,"firstname":"Hicks","lastname":"Farrell","age":25,"gender":"M","address":"468 Middleton Street","employer":"Zolarex","email":"hicksfarrell@zolarex.com","city":"Columbus","state":"OR"} +{"index":{"_id":"827"}} +{"account_number":827,"balance":37536,"firstname":"Naomi","lastname":"Ball","age":29,"gender":"F","address":"319 Stewart Street","employer":"Isotronic","email":"naomiball@isotronic.com","city":"Trona","state":"NM"} +{"index":{"_id":"834"}} +{"account_number":834,"balance":38049,"firstname":"Sybil","lastname":"Carrillo","age":25,"gender":"M","address":"359 Baughman Place","employer":"Phuel","email":"sybilcarrillo@phuel.com","city":"Kohatk","state":"CT"} +{"index":{"_id":"839"}} +{"account_number":839,"balance":38292,"firstname":"Langley","lastname":"Neal","age":39,"gender":"F","address":"565 Newton Street","employer":"Liquidoc","email":"langleyneal@liquidoc.com","city":"Osage","state":"AL"} +{"index":{"_id":"841"}} +{"account_number":841,"balance":28291,"firstname":"Dalton","lastname":"Waters","age":21,"gender":"M","address":"859 Grand Street","employer":"Malathion","email":"daltonwaters@malathion.com","city":"Tonopah","state":"AZ"} +{"index":{"_id":"846"}} +{"account_number":846,"balance":35099,"firstname":"Maureen","lastname":"Glass","age":22,"gender":"M","address":"140 Amherst Street","employer":"Stelaecor","email":"maureenglass@stelaecor.com","city":"Cucumber","state":"IL"} +{"index":{"_id":"853"}} +{"account_number":853,"balance":38353,"firstname":"Travis","lastname":"Parks","age":40,"gender":"M","address":"930 Bay Avenue","employer":"Pyramax","email":"travisparks@pyramax.com","city":"Gadsden","state":"ND"} +{"index":{"_id":"858"}} +{"account_number":858,"balance":23194,"firstname":"Small","lastname":"Hatfield","age":36,"gender":"M","address":"593 Tennis Court","employer":"Letpro","email":"smallhatfield@letpro.com","city":"Haena","state":"KS"} +{"index":{"_id":"860"}} +{"account_number":860,"balance":23613,"firstname":"Clark","lastname":"Boyd","age":37,"gender":"M","address":"501 Rock Street","employer":"Deepends","email":"clarkboyd@deepends.com","city":"Whitewater","state":"MA"} +{"index":{"_id":"865"}} +{"account_number":865,"balance":10574,"firstname":"Cook","lastname":"Kelley","age":28,"gender":"F","address":"865 Lincoln Terrace","employer":"Quizmo","email":"cookkelley@quizmo.com","city":"Kansas","state":"KY"} +{"index":{"_id":"872"}} +{"account_number":872,"balance":26314,"firstname":"Jane","lastname":"Greer","age":36,"gender":"F","address":"717 Hewes Street","employer":"Newcube","email":"janegreer@newcube.com","city":"Delshire","state":"DE"} +{"index":{"_id":"877"}} +{"account_number":877,"balance":42879,"firstname":"Tracey","lastname":"Ruiz","age":34,"gender":"F","address":"141 Tompkins Avenue","employer":"Waab","email":"traceyruiz@waab.com","city":"Zeba","state":"NM"} +{"index":{"_id":"884"}} +{"account_number":884,"balance":29316,"firstname":"Reva","lastname":"Rosa","age":40,"gender":"M","address":"784 Greene Avenue","employer":"Urbanshee","email":"revarosa@urbanshee.com","city":"Bakersville","state":"MS"} +{"index":{"_id":"889"}} +{"account_number":889,"balance":26464,"firstname":"Fischer","lastname":"Klein","age":38,"gender":"F","address":"948 Juliana Place","employer":"Comtext","email":"fischerklein@comtext.com","city":"Jackpot","state":"PA"} +{"index":{"_id":"891"}} +{"account_number":891,"balance":34829,"firstname":"Jacobson","lastname":"Clemons","age":24,"gender":"F","address":"507 Wilson Street","employer":"Quilm","email":"jacobsonclemons@quilm.com","city":"Muir","state":"TX"} +{"index":{"_id":"896"}} +{"account_number":896,"balance":31947,"firstname":"Buckley","lastname":"Peterson","age":26,"gender":"M","address":"217 Beayer Place","employer":"Earwax","email":"buckleypeterson@earwax.com","city":"Franklin","state":"DE"} +{"index":{"_id":"904"}} +{"account_number":904,"balance":27707,"firstname":"Mendez","lastname":"Mcneil","age":26,"gender":"M","address":"431 Halsey Street","employer":"Macronaut","email":"mendezmcneil@macronaut.com","city":"Troy","state":"OK"} +{"index":{"_id":"909"}} +{"account_number":909,"balance":18421,"firstname":"Stark","lastname":"Lewis","age":36,"gender":"M","address":"409 Tilden Avenue","employer":"Frosnex","email":"starklewis@frosnex.com","city":"Axis","state":"CA"} +{"index":{"_id":"911"}} +{"account_number":911,"balance":42655,"firstname":"Annie","lastname":"Lyons","age":21,"gender":"M","address":"518 Woods Place","employer":"Enerforce","email":"annielyons@enerforce.com","city":"Stagecoach","state":"MA"} +{"index":{"_id":"916"}} +{"account_number":916,"balance":47887,"firstname":"Jarvis","lastname":"Alexander","age":40,"gender":"M","address":"406 Bergen Avenue","employer":"Equitax","email":"jarvisalexander@equitax.com","city":"Haring","state":"KY"} +{"index":{"_id":"923"}} +{"account_number":923,"balance":48466,"firstname":"Mueller","lastname":"Mckee","age":26,"gender":"M","address":"298 Ruby Street","employer":"Luxuria","email":"muellermckee@luxuria.com","city":"Coleville","state":"TN"} +{"index":{"_id":"928"}} +{"account_number":928,"balance":19611,"firstname":"Hester","lastname":"Copeland","age":22,"gender":"F","address":"425 Cropsey Avenue","employer":"Dymi","email":"hestercopeland@dymi.com","city":"Wolcott","state":"NE"} +{"index":{"_id":"930"}} +{"account_number":930,"balance":47257,"firstname":"Kinney","lastname":"Lawson","age":39,"gender":"M","address":"501 Raleigh Place","employer":"Neptide","email":"kinneylawson@neptide.com","city":"Deltaville","state":"MD"} +{"index":{"_id":"935"}} +{"account_number":935,"balance":4959,"firstname":"Flowers","lastname":"Robles","age":30,"gender":"M","address":"201 Hull Street","employer":"Xelegyl","email":"flowersrobles@xelegyl.com","city":"Rehrersburg","state":"AL"} +{"index":{"_id":"942"}} +{"account_number":942,"balance":21299,"firstname":"Hamilton","lastname":"Clayton","age":26,"gender":"M","address":"413 Debevoise Street","employer":"Architax","email":"hamiltonclayton@architax.com","city":"Terlingua","state":"NM"} +{"index":{"_id":"947"}} +{"account_number":947,"balance":22039,"firstname":"Virgie","lastname":"Garza","age":30,"gender":"M","address":"903 Matthews Court","employer":"Plasmox","email":"virgiegarza@plasmox.com","city":"Somerset","state":"WY"} +{"index":{"_id":"954"}} +{"account_number":954,"balance":49404,"firstname":"Jenna","lastname":"Martin","age":22,"gender":"M","address":"688 Hart Street","employer":"Zinca","email":"jennamartin@zinca.com","city":"Oasis","state":"MD"} +{"index":{"_id":"959"}} +{"account_number":959,"balance":34743,"firstname":"Shaffer","lastname":"Cervantes","age":40,"gender":"M","address":"931 Varick Avenue","employer":"Oceanica","email":"shaffercervantes@oceanica.com","city":"Bowie","state":"AL"} +{"index":{"_id":"961"}} +{"account_number":961,"balance":43219,"firstname":"Betsy","lastname":"Hyde","age":27,"gender":"F","address":"183 Junius Street","employer":"Tubalum","email":"betsyhyde@tubalum.com","city":"Driftwood","state":"TX"} +{"index":{"_id":"966"}} +{"account_number":966,"balance":20619,"firstname":"Susanne","lastname":"Rodriguez","age":35,"gender":"F","address":"255 Knickerbocker Avenue","employer":"Comtrek","email":"susannerodriguez@comtrek.com","city":"Trinway","state":"TX"} +{"index":{"_id":"973"}} +{"account_number":973,"balance":45756,"firstname":"Rice","lastname":"Farmer","age":31,"gender":"M","address":"476 Nassau Avenue","employer":"Photobin","email":"ricefarmer@photobin.com","city":"Suitland","state":"ME"} +{"index":{"_id":"978"}} +{"account_number":978,"balance":21459,"firstname":"Melanie","lastname":"Rojas","age":33,"gender":"M","address":"991 Java Street","employer":"Kage","email":"melanierojas@kage.com","city":"Greenock","state":"VT"} +{"index":{"_id":"980"}} +{"account_number":980,"balance":42436,"firstname":"Cash","lastname":"Collier","age":33,"gender":"F","address":"999 Sapphire Street","employer":"Ceprene","email":"cashcollier@ceprene.com","city":"Glidden","state":"AK"} +{"index":{"_id":"985"}} +{"account_number":985,"balance":20083,"firstname":"Martin","lastname":"Gardner","age":28,"gender":"F","address":"644 Fairview Place","employer":"Golistic","email":"martingardner@golistic.com","city":"Connerton","state":"NJ"} +{"index":{"_id":"992"}} +{"account_number":992,"balance":11413,"firstname":"Kristie","lastname":"Kennedy","age":33,"gender":"F","address":"750 Hudson Avenue","employer":"Ludak","email":"kristiekennedy@ludak.com","city":"Warsaw","state":"WY"} +{"index":{"_id":"997"}} +{"account_number":997,"balance":25311,"firstname":"Combs","lastname":"Frederick","age":20,"gender":"M","address":"586 Lloyd Court","employer":"Pathways","email":"combsfrederick@pathways.com","city":"Williamson","state":"CA"} +{"index":{"_id":"3"}} +{"account_number":3,"balance":44947,"firstname":"Levine","lastname":"Burks","age":26,"gender":"F","address":"328 Wilson Avenue","employer":"Amtap","email":"levineburks@amtap.com","city":"Cochranville","state":"HI"} +{"index":{"_id":"8"}} +{"account_number":8,"balance":48868,"firstname":"Jan","lastname":"Burns","age":35,"gender":"M","address":"699 Visitation Place","employer":"Glasstep","email":"janburns@glasstep.com","city":"Wakulla","state":"AZ"} +{"index":{"_id":"10"}} +{"account_number":10,"balance":46170,"firstname":"Dominique","lastname":"Park","age":37,"gender":"F","address":"100 Gatling Place","employer":"Conjurica","email":"dominiquepark@conjurica.com","city":"Omar","state":"NJ"} +{"index":{"_id":"15"}} +{"account_number":15,"balance":43456,"firstname":"Bobbie","lastname":"Sexton","age":21,"gender":"M","address":"232 Sedgwick Place","employer":"Zytrex","email":"bobbiesexton@zytrex.com","city":"Hendersonville","state":"CA"} +{"index":{"_id":"22"}} +{"account_number":22,"balance":40283,"firstname":"Barrera","lastname":"Terrell","age":23,"gender":"F","address":"292 Orange Street","employer":"Steelfab","email":"barreraterrell@steelfab.com","city":"Bynum","state":"ME"} +{"index":{"_id":"27"}} +{"account_number":27,"balance":6176,"firstname":"Meyers","lastname":"Williamson","age":26,"gender":"F","address":"675 Henderson Walk","employer":"Plexia","email":"meyerswilliamson@plexia.com","city":"Richmond","state":"AZ"} +{"index":{"_id":"34"}} +{"account_number":34,"balance":35379,"firstname":"Ellison","lastname":"Kim","age":30,"gender":"F","address":"986 Revere Place","employer":"Signity","email":"ellisonkim@signity.com","city":"Sehili","state":"IL"} +{"index":{"_id":"39"}} +{"account_number":39,"balance":38688,"firstname":"Bowers","lastname":"Mendez","age":22,"gender":"F","address":"665 Bennet Court","employer":"Farmage","email":"bowersmendez@farmage.com","city":"Duryea","state":"PA"} +{"index":{"_id":"41"}} +{"account_number":41,"balance":36060,"firstname":"Hancock","lastname":"Holden","age":20,"gender":"M","address":"625 Gaylord Drive","employer":"Poochies","email":"hancockholden@poochies.com","city":"Alamo","state":"KS"} +{"index":{"_id":"46"}} +{"account_number":46,"balance":12351,"firstname":"Karla","lastname":"Bowman","age":23,"gender":"M","address":"554 Chapel Street","employer":"Undertap","email":"karlabowman@undertap.com","city":"Sylvanite","state":"DC"} +{"index":{"_id":"53"}} +{"account_number":53,"balance":28101,"firstname":"Kathryn","lastname":"Payne","age":29,"gender":"F","address":"467 Louis Place","employer":"Katakana","email":"kathrynpayne@katakana.com","city":"Harviell","state":"SD"} +{"index":{"_id":"58"}} +{"account_number":58,"balance":31697,"firstname":"Marva","lastname":"Cannon","age":40,"gender":"M","address":"993 Highland Place","employer":"Comcubine","email":"marvacannon@comcubine.com","city":"Orviston","state":"MO"} +{"index":{"_id":"60"}} +{"account_number":60,"balance":45955,"firstname":"Maude","lastname":"Casey","age":31,"gender":"F","address":"566 Strauss Street","employer":"Quilch","email":"maudecasey@quilch.com","city":"Enlow","state":"GA"} +{"index":{"_id":"65"}} +{"account_number":65,"balance":23282,"firstname":"Leonor","lastname":"Pruitt","age":24,"gender":"M","address":"974 Terrace Place","employer":"Velos","email":"leonorpruitt@velos.com","city":"Devon","state":"WI"} +{"index":{"_id":"72"}} +{"account_number":72,"balance":9732,"firstname":"Barlow","lastname":"Rhodes","age":25,"gender":"F","address":"891 Clinton Avenue","employer":"Zialactic","email":"barlowrhodes@zialactic.com","city":"Echo","state":"TN"} +{"index":{"_id":"77"}} +{"account_number":77,"balance":5724,"firstname":"Byrd","lastname":"Conley","age":24,"gender":"F","address":"698 Belmont Avenue","employer":"Zidox","email":"byrdconley@zidox.com","city":"Rockbridge","state":"SC"} +{"index":{"_id":"84"}} +{"account_number":84,"balance":3001,"firstname":"Hutchinson","lastname":"Newton","age":34,"gender":"F","address":"553 Locust Street","employer":"Zaggles","email":"hutchinsonnewton@zaggles.com","city":"Snyderville","state":"DC"} +{"index":{"_id":"89"}} +{"account_number":89,"balance":13263,"firstname":"Mcdowell","lastname":"Bradley","age":28,"gender":"M","address":"960 Howard Alley","employer":"Grok","email":"mcdowellbradley@grok.com","city":"Toftrees","state":"TX"} +{"index":{"_id":"91"}} +{"account_number":91,"balance":29799,"firstname":"Vonda","lastname":"Galloway","age":20,"gender":"M","address":"988 Voorhies Avenue","employer":"Illumity","email":"vondagalloway@illumity.com","city":"Holcombe","state":"HI"} +{"index":{"_id":"96"}} +{"account_number":96,"balance":15933,"firstname":"Shirley","lastname":"Edwards","age":38,"gender":"M","address":"817 Caton Avenue","employer":"Equitox","email":"shirleyedwards@equitox.com","city":"Nelson","state":"MA"} +{"index":{"_id":"104"}} +{"account_number":104,"balance":32619,"firstname":"Casey","lastname":"Roth","age":29,"gender":"M","address":"963 Railroad Avenue","employer":"Hotcakes","email":"caseyroth@hotcakes.com","city":"Davenport","state":"OH"} +{"index":{"_id":"109"}} +{"account_number":109,"balance":25812,"firstname":"Gretchen","lastname":"Dawson","age":31,"gender":"M","address":"610 Bethel Loop","employer":"Tetak","email":"gretchendawson@tetak.com","city":"Hailesboro","state":"CO"} +{"index":{"_id":"111"}} +{"account_number":111,"balance":1481,"firstname":"Traci","lastname":"Allison","age":35,"gender":"M","address":"922 Bryant Street","employer":"Enjola","email":"traciallison@enjola.com","city":"Robinette","state":"OR"} +{"index":{"_id":"116"}} +{"account_number":116,"balance":21335,"firstname":"Hobbs","lastname":"Wright","age":24,"gender":"M","address":"965 Temple Court","employer":"Netbook","email":"hobbswright@netbook.com","city":"Strong","state":"CA"} +{"index":{"_id":"123"}} +{"account_number":123,"balance":3079,"firstname":"Cleo","lastname":"Beach","age":27,"gender":"F","address":"653 Haring Street","employer":"Proxsoft","email":"cleobeach@proxsoft.com","city":"Greensburg","state":"ME"} +{"index":{"_id":"128"}} +{"account_number":128,"balance":3556,"firstname":"Mack","lastname":"Bullock","age":34,"gender":"F","address":"462 Ingraham Street","employer":"Terascape","email":"mackbullock@terascape.com","city":"Eureka","state":"PA"} +{"index":{"_id":"130"}} +{"account_number":130,"balance":24171,"firstname":"Roxie","lastname":"Cantu","age":33,"gender":"M","address":"841 Catherine Street","employer":"Skybold","email":"roxiecantu@skybold.com","city":"Deputy","state":"NE"} +{"index":{"_id":"135"}} +{"account_number":135,"balance":24885,"firstname":"Stevenson","lastname":"Crosby","age":40,"gender":"F","address":"473 Boardwalk ","employer":"Accel","email":"stevensoncrosby@accel.com","city":"Norris","state":"OK"} +{"index":{"_id":"142"}} +{"account_number":142,"balance":4544,"firstname":"Vang","lastname":"Hughes","age":27,"gender":"M","address":"357 Landis Court","employer":"Bolax","email":"vanghughes@bolax.com","city":"Emerald","state":"WY"} +{"index":{"_id":"147"}} +{"account_number":147,"balance":35921,"firstname":"Charmaine","lastname":"Whitney","age":28,"gender":"F","address":"484 Seton Place","employer":"Comveyer","email":"charmainewhitney@comveyer.com","city":"Dexter","state":"DC"} +{"index":{"_id":"154"}} +{"account_number":154,"balance":40945,"firstname":"Burns","lastname":"Solis","age":31,"gender":"M","address":"274 Lorraine Street","employer":"Rodemco","email":"burnssolis@rodemco.com","city":"Ballico","state":"WI"} +{"index":{"_id":"159"}} +{"account_number":159,"balance":1696,"firstname":"Alvarez","lastname":"Mack","age":22,"gender":"F","address":"897 Manor Court","employer":"Snorus","email":"alvarezmack@snorus.com","city":"Rosedale","state":"CA"} +{"index":{"_id":"161"}} +{"account_number":161,"balance":4659,"firstname":"Doreen","lastname":"Randall","age":37,"gender":"F","address":"178 Court Street","employer":"Calcula","email":"doreenrandall@calcula.com","city":"Belmont","state":"TX"} +{"index":{"_id":"166"}} +{"account_number":166,"balance":33847,"firstname":"Rutledge","lastname":"Rivas","age":23,"gender":"M","address":"352 Verona Street","employer":"Virxo","email":"rutledgerivas@virxo.com","city":"Brandermill","state":"NE"} +{"index":{"_id":"173"}} +{"account_number":173,"balance":5989,"firstname":"Whitley","lastname":"Blevins","age":32,"gender":"M","address":"127 Brooklyn Avenue","employer":"Pawnagra","email":"whitleyblevins@pawnagra.com","city":"Rodanthe","state":"ND"} +{"index":{"_id":"178"}} +{"account_number":178,"balance":36735,"firstname":"Clements","lastname":"Finley","age":39,"gender":"F","address":"270 Story Court","employer":"Imaginart","email":"clementsfinley@imaginart.com","city":"Lookingglass","state":"MN"} +{"index":{"_id":"180"}} +{"account_number":180,"balance":34236,"firstname":"Ursula","lastname":"Goodman","age":32,"gender":"F","address":"414 Clinton Street","employer":"Earthmark","email":"ursulagoodman@earthmark.com","city":"Rote","state":"AR"} +{"index":{"_id":"185"}} +{"account_number":185,"balance":43532,"firstname":"Laurel","lastname":"Cline","age":40,"gender":"M","address":"788 Fenimore Street","employer":"Prismatic","email":"laurelcline@prismatic.com","city":"Frank","state":"UT"} +{"index":{"_id":"192"}} +{"account_number":192,"balance":23508,"firstname":"Ramsey","lastname":"Carr","age":31,"gender":"F","address":"209 Williamsburg Street","employer":"Strezzo","email":"ramseycarr@strezzo.com","city":"Grapeview","state":"NM"} +{"index":{"_id":"197"}} +{"account_number":197,"balance":17246,"firstname":"Sweet","lastname":"Sanders","age":33,"gender":"F","address":"712 Homecrest Court","employer":"Isosure","email":"sweetsanders@isosure.com","city":"Sheatown","state":"VT"} +{"index":{"_id":"200"}} +{"account_number":200,"balance":26210,"firstname":"Teri","lastname":"Hester","age":39,"gender":"M","address":"653 Abbey Court","employer":"Electonic","email":"terihester@electonic.com","city":"Martell","state":"MD"} +{"index":{"_id":"205"}} +{"account_number":205,"balance":45493,"firstname":"Johnson","lastname":"Chang","age":28,"gender":"F","address":"331 John Street","employer":"Gleamink","email":"johnsonchang@gleamink.com","city":"Sultana","state":"KS"} +{"index":{"_id":"212"}} +{"account_number":212,"balance":10299,"firstname":"Marisol","lastname":"Fischer","age":39,"gender":"M","address":"362 Prince Street","employer":"Autograte","email":"marisolfischer@autograte.com","city":"Oley","state":"SC"} +{"index":{"_id":"217"}} +{"account_number":217,"balance":33730,"firstname":"Sally","lastname":"Mccoy","age":38,"gender":"F","address":"854 Corbin Place","employer":"Omnigog","email":"sallymccoy@omnigog.com","city":"Escondida","state":"FL"} +{"index":{"_id":"224"}} +{"account_number":224,"balance":42708,"firstname":"Billie","lastname":"Nixon","age":28,"gender":"F","address":"241 Kaufman Place","employer":"Xanide","email":"billienixon@xanide.com","city":"Chapin","state":"NY"} +{"index":{"_id":"229"}} +{"account_number":229,"balance":2740,"firstname":"Jana","lastname":"Hensley","age":30,"gender":"M","address":"176 Erasmus Street","employer":"Isotrack","email":"janahensley@isotrack.com","city":"Caledonia","state":"ME"} +{"index":{"_id":"231"}} +{"account_number":231,"balance":46180,"firstname":"Essie","lastname":"Clarke","age":34,"gender":"F","address":"308 Harbor Lane","employer":"Pharmacon","email":"essieclarke@pharmacon.com","city":"Fillmore","state":"MS"} +{"index":{"_id":"236"}} +{"account_number":236,"balance":41200,"firstname":"Suzanne","lastname":"Bird","age":39,"gender":"F","address":"219 Luquer Street","employer":"Imant","email":"suzannebird@imant.com","city":"Bainbridge","state":"NY"} +{"index":{"_id":"243"}} +{"account_number":243,"balance":29902,"firstname":"Evangelina","lastname":"Perez","age":20,"gender":"M","address":"787 Joval Court","employer":"Keengen","email":"evangelinaperez@keengen.com","city":"Mulberry","state":"SD"} +{"index":{"_id":"248"}} +{"account_number":248,"balance":49989,"firstname":"West","lastname":"England","age":36,"gender":"M","address":"717 Hendrickson Place","employer":"Obliq","email":"westengland@obliq.com","city":"Maury","state":"WA"} +{"index":{"_id":"250"}} +{"account_number":250,"balance":27893,"firstname":"Earlene","lastname":"Ellis","age":39,"gender":"F","address":"512 Bay Street","employer":"Codact","email":"earleneellis@codact.com","city":"Sunwest","state":"GA"} +{"index":{"_id":"255"}} +{"account_number":255,"balance":49339,"firstname":"Iva","lastname":"Rivers","age":38,"gender":"M","address":"470 Rost Place","employer":"Mantrix","email":"ivarivers@mantrix.com","city":"Disautel","state":"MD"} +{"index":{"_id":"262"}} +{"account_number":262,"balance":30289,"firstname":"Tameka","lastname":"Levine","age":36,"gender":"F","address":"815 Atlantic Avenue","employer":"Acium","email":"tamekalevine@acium.com","city":"Winchester","state":"SD"} +{"index":{"_id":"267"}} +{"account_number":267,"balance":42753,"firstname":"Weeks","lastname":"Castillo","age":21,"gender":"F","address":"526 Holt Court","employer":"Talendula","email":"weekscastillo@talendula.com","city":"Washington","state":"NV"} +{"index":{"_id":"274"}} +{"account_number":274,"balance":12104,"firstname":"Frieda","lastname":"House","age":33,"gender":"F","address":"171 Banker Street","employer":"Quonk","email":"friedahouse@quonk.com","city":"Aberdeen","state":"NJ"} +{"index":{"_id":"279"}} +{"account_number":279,"balance":15904,"firstname":"Chapman","lastname":"Hart","age":32,"gender":"F","address":"902 Bliss Terrace","employer":"Kongene","email":"chapmanhart@kongene.com","city":"Bradenville","state":"NJ"} +{"index":{"_id":"281"}} +{"account_number":281,"balance":39830,"firstname":"Bean","lastname":"Aguirre","age":20,"gender":"F","address":"133 Pilling Street","employer":"Amril","email":"beanaguirre@amril.com","city":"Waterview","state":"TX"} +{"index":{"_id":"286"}} +{"account_number":286,"balance":39063,"firstname":"Rosetta","lastname":"Turner","age":35,"gender":"M","address":"169 Jefferson Avenue","employer":"Spacewax","email":"rosettaturner@spacewax.com","city":"Stewart","state":"MO"} +{"index":{"_id":"293"}} +{"account_number":293,"balance":29867,"firstname":"Cruz","lastname":"Carver","age":28,"gender":"F","address":"465 Boerum Place","employer":"Vitricomp","email":"cruzcarver@vitricomp.com","city":"Crayne","state":"CO"} +{"index":{"_id":"298"}} +{"account_number":298,"balance":34334,"firstname":"Bullock","lastname":"Marsh","age":20,"gender":"M","address":"589 Virginia Place","employer":"Renovize","email":"bullockmarsh@renovize.com","city":"Coinjock","state":"UT"} +{"index":{"_id":"301"}} +{"account_number":301,"balance":16782,"firstname":"Minerva","lastname":"Graham","age":35,"gender":"M","address":"532 Harrison Place","employer":"Sureplex","email":"minervagraham@sureplex.com","city":"Belleview","state":"GA"} +{"index":{"_id":"306"}} +{"account_number":306,"balance":2171,"firstname":"Hensley","lastname":"Hardin","age":40,"gender":"M","address":"196 Maujer Street","employer":"Neocent","email":"hensleyhardin@neocent.com","city":"Reinerton","state":"HI"} +{"index":{"_id":"313"}} +{"account_number":313,"balance":34108,"firstname":"Alston","lastname":"Henderson","age":36,"gender":"F","address":"132 Prescott Place","employer":"Prosure","email":"alstonhenderson@prosure.com","city":"Worton","state":"IA"} +{"index":{"_id":"318"}} +{"account_number":318,"balance":8512,"firstname":"Nichole","lastname":"Pearson","age":34,"gender":"F","address":"656 Lacon Court","employer":"Yurture","email":"nicholepearson@yurture.com","city":"Juarez","state":"MO"} +{"index":{"_id":"320"}} +{"account_number":320,"balance":34521,"firstname":"Patti","lastname":"Brennan","age":37,"gender":"F","address":"870 Degraw Street","employer":"Cognicode","email":"pattibrennan@cognicode.com","city":"Torboy","state":"FL"} +{"index":{"_id":"325"}} +{"account_number":325,"balance":1956,"firstname":"Magdalena","lastname":"Simmons","age":25,"gender":"F","address":"681 Townsend Street","employer":"Geekosis","email":"magdalenasimmons@geekosis.com","city":"Sterling","state":"CA"} +{"index":{"_id":"332"}} +{"account_number":332,"balance":37770,"firstname":"Shepherd","lastname":"Davenport","age":28,"gender":"F","address":"586 Montague Terrace","employer":"Ecraze","email":"shepherddavenport@ecraze.com","city":"Accoville","state":"NM"} +{"index":{"_id":"337"}} +{"account_number":337,"balance":43432,"firstname":"Monroe","lastname":"Stafford","age":37,"gender":"F","address":"183 Seigel Street","employer":"Centuria","email":"monroestafford@centuria.com","city":"Camino","state":"DE"} +{"index":{"_id":"344"}} +{"account_number":344,"balance":42654,"firstname":"Sasha","lastname":"Baxter","age":35,"gender":"F","address":"700 Bedford Place","employer":"Callflex","email":"sashabaxter@callflex.com","city":"Campo","state":"MI"} +{"index":{"_id":"349"}} +{"account_number":349,"balance":24180,"firstname":"Allison","lastname":"Fitzpatrick","age":22,"gender":"F","address":"913 Arlington Avenue","employer":"Veraq","email":"allisonfitzpatrick@veraq.com","city":"Marbury","state":"TX"} +{"index":{"_id":"351"}} +{"account_number":351,"balance":47089,"firstname":"Hendrix","lastname":"Stephens","age":29,"gender":"M","address":"181 Beaver Street","employer":"Recrisys","email":"hendrixstephens@recrisys.com","city":"Denio","state":"OR"} +{"index":{"_id":"356"}} +{"account_number":356,"balance":34540,"firstname":"Lourdes","lastname":"Valdez","age":20,"gender":"F","address":"700 Anchorage Place","employer":"Interloo","email":"lourdesvaldez@interloo.com","city":"Goldfield","state":"OK"} +{"index":{"_id":"363"}} +{"account_number":363,"balance":34007,"firstname":"Peggy","lastname":"Bright","age":21,"gender":"M","address":"613 Engert Avenue","employer":"Inventure","email":"peggybright@inventure.com","city":"Chautauqua","state":"ME"} +{"index":{"_id":"368"}} +{"account_number":368,"balance":23535,"firstname":"Hooper","lastname":"Tyson","age":39,"gender":"M","address":"892 Taaffe Place","employer":"Zaggle","email":"hoopertyson@zaggle.com","city":"Nutrioso","state":"ME"} +{"index":{"_id":"370"}} +{"account_number":370,"balance":28499,"firstname":"Oneill","lastname":"Carney","age":25,"gender":"F","address":"773 Adelphi Street","employer":"Bedder","email":"oneillcarney@bedder.com","city":"Yorklyn","state":"FL"} +{"index":{"_id":"375"}} +{"account_number":375,"balance":23860,"firstname":"Phoebe","lastname":"Patton","age":25,"gender":"M","address":"564 Hale Avenue","employer":"Xoggle","email":"phoebepatton@xoggle.com","city":"Brule","state":"NM"} +{"index":{"_id":"382"}} +{"account_number":382,"balance":42061,"firstname":"Finley","lastname":"Singleton","age":37,"gender":"F","address":"407 Clay Street","employer":"Quarex","email":"finleysingleton@quarex.com","city":"Bedias","state":"LA"} +{"index":{"_id":"387"}} +{"account_number":387,"balance":35916,"firstname":"April","lastname":"Hill","age":29,"gender":"M","address":"818 Bayard Street","employer":"Kengen","email":"aprilhill@kengen.com","city":"Chloride","state":"NC"} +{"index":{"_id":"394"}} +{"account_number":394,"balance":6121,"firstname":"Lorrie","lastname":"Nunez","age":38,"gender":"M","address":"221 Ralph Avenue","employer":"Bullzone","email":"lorrienunez@bullzone.com","city":"Longoria","state":"ID"} +{"index":{"_id":"399"}} +{"account_number":399,"balance":32587,"firstname":"Carmela","lastname":"Franks","age":23,"gender":"M","address":"617 Dewey Place","employer":"Zensure","email":"carmelafranks@zensure.com","city":"Sanders","state":"DC"} +{"index":{"_id":"402"}} +{"account_number":402,"balance":1282,"firstname":"Pacheco","lastname":"Rosales","age":32,"gender":"M","address":"538 Pershing Loop","employer":"Circum","email":"pachecorosales@circum.com","city":"Elbert","state":"ID"} +{"index":{"_id":"407"}} +{"account_number":407,"balance":36417,"firstname":"Gilda","lastname":"Jacobson","age":29,"gender":"F","address":"883 Loring Avenue","employer":"Comveyor","email":"gildajacobson@comveyor.com","city":"Topaz","state":"NH"} +{"index":{"_id":"414"}} +{"account_number":414,"balance":17506,"firstname":"Conway","lastname":"Daugherty","age":37,"gender":"F","address":"643 Kermit Place","employer":"Lyria","email":"conwaydaugherty@lyria.com","city":"Vaughn","state":"NV"} +{"index":{"_id":"419"}} +{"account_number":419,"balance":34847,"firstname":"Helen","lastname":"Montoya","age":29,"gender":"F","address":"736 Kingsland Avenue","employer":"Hairport","email":"helenmontoya@hairport.com","city":"Edinburg","state":"NE"} +{"index":{"_id":"421"}} +{"account_number":421,"balance":46868,"firstname":"Tamika","lastname":"Mccall","age":27,"gender":"F","address":"764 Bragg Court","employer":"Eventix","email":"tamikamccall@eventix.com","city":"Tivoli","state":"RI"} +{"index":{"_id":"426"}} +{"account_number":426,"balance":4499,"firstname":"Julie","lastname":"Parsons","age":31,"gender":"M","address":"768 Keap Street","employer":"Goko","email":"julieparsons@goko.com","city":"Coldiron","state":"VA"} +{"index":{"_id":"433"}} +{"account_number":433,"balance":19266,"firstname":"Wilkinson","lastname":"Flowers","age":39,"gender":"M","address":"154 Douglass Street","employer":"Xsports","email":"wilkinsonflowers@xsports.com","city":"Coultervillle","state":"MN"} +{"index":{"_id":"438"}} +{"account_number":438,"balance":16367,"firstname":"Walter","lastname":"Velez","age":27,"gender":"F","address":"931 Farragut Road","employer":"Virva","email":"waltervelez@virva.com","city":"Tyro","state":"WV"} +{"index":{"_id":"440"}} +{"account_number":440,"balance":41590,"firstname":"Ray","lastname":"Wiley","age":31,"gender":"F","address":"102 Barwell Terrace","employer":"Polaria","email":"raywiley@polaria.com","city":"Hardyville","state":"IA"} +{"index":{"_id":"445"}} +{"account_number":445,"balance":41178,"firstname":"Rodriguez","lastname":"Macias","age":34,"gender":"M","address":"164 Boerum Street","employer":"Xylar","email":"rodriguezmacias@xylar.com","city":"Riner","state":"AL"} +{"index":{"_id":"452"}} +{"account_number":452,"balance":3589,"firstname":"Blackwell","lastname":"Delaney","age":39,"gender":"F","address":"443 Sackett Street","employer":"Imkan","email":"blackwelldelaney@imkan.com","city":"Gasquet","state":"DC"} +{"index":{"_id":"457"}} +{"account_number":457,"balance":14057,"firstname":"Bush","lastname":"Gordon","age":34,"gender":"M","address":"975 Dakota Place","employer":"Softmicro","email":"bushgordon@softmicro.com","city":"Chemung","state":"PA"} +{"index":{"_id":"464"}} +{"account_number":464,"balance":20504,"firstname":"Cobb","lastname":"Humphrey","age":21,"gender":"M","address":"823 Sunnyside Avenue","employer":"Apexia","email":"cobbhumphrey@apexia.com","city":"Wintersburg","state":"NY"} +{"index":{"_id":"469"}} +{"account_number":469,"balance":26509,"firstname":"Marci","lastname":"Shepherd","age":26,"gender":"M","address":"565 Hall Street","employer":"Shadease","email":"marcishepherd@shadease.com","city":"Springhill","state":"IL"} +{"index":{"_id":"471"}} +{"account_number":471,"balance":7629,"firstname":"Juana","lastname":"Silva","age":36,"gender":"M","address":"249 Amity Street","employer":"Artworlds","email":"juanasilva@artworlds.com","city":"Norfolk","state":"TX"} +{"index":{"_id":"476"}} +{"account_number":476,"balance":33386,"firstname":"Silva","lastname":"Marks","age":31,"gender":"F","address":"183 Eldert Street","employer":"Medifax","email":"silvamarks@medifax.com","city":"Hachita","state":"RI"} +{"index":{"_id":"483"}} +{"account_number":483,"balance":6344,"firstname":"Kelley","lastname":"Harper","age":29,"gender":"M","address":"758 Preston Court","employer":"Xyqag","email":"kelleyharper@xyqag.com","city":"Healy","state":"IA"} +{"index":{"_id":"488"}} +{"account_number":488,"balance":6289,"firstname":"Wilma","lastname":"Hopkins","age":38,"gender":"M","address":"428 Lee Avenue","employer":"Entality","email":"wilmahopkins@entality.com","city":"Englevale","state":"WI"} +{"index":{"_id":"490"}} +{"account_number":490,"balance":1447,"firstname":"Strong","lastname":"Hendrix","age":26,"gender":"F","address":"134 Beach Place","employer":"Duoflex","email":"stronghendrix@duoflex.com","city":"Allentown","state":"ND"} +{"index":{"_id":"495"}} +{"account_number":495,"balance":13478,"firstname":"Abigail","lastname":"Nichols","age":40,"gender":"F","address":"887 President Street","employer":"Enquility","email":"abigailnichols@enquility.com","city":"Bagtown","state":"NM"} +{"index":{"_id":"503"}} +{"account_number":503,"balance":42649,"firstname":"Leta","lastname":"Stout","age":39,"gender":"F","address":"518 Bowery Street","employer":"Pivitol","email":"letastout@pivitol.com","city":"Boonville","state":"ND"} +{"index":{"_id":"508"}} +{"account_number":508,"balance":41300,"firstname":"Lawrence","lastname":"Mathews","age":27,"gender":"F","address":"987 Rose Street","employer":"Deviltoe","email":"lawrencemathews@deviltoe.com","city":"Woodburn","state":"FL"} +{"index":{"_id":"510"}} +{"account_number":510,"balance":48504,"firstname":"Petty","lastname":"Sykes","age":28,"gender":"M","address":"566 Village Road","employer":"Nebulean","email":"pettysykes@nebulean.com","city":"Wedgewood","state":"MO"} +{"index":{"_id":"515"}} +{"account_number":515,"balance":18531,"firstname":"Lott","lastname":"Keller","age":27,"gender":"M","address":"827 Miami Court","employer":"Translink","email":"lottkeller@translink.com","city":"Gila","state":"TX"} +{"index":{"_id":"522"}} +{"account_number":522,"balance":19879,"firstname":"Faulkner","lastname":"Garrett","age":29,"gender":"F","address":"396 Grove Place","employer":"Pigzart","email":"faulknergarrett@pigzart.com","city":"Felt","state":"AR"} +{"index":{"_id":"527"}} +{"account_number":527,"balance":2028,"firstname":"Carver","lastname":"Peters","age":35,"gender":"M","address":"816 Victor Road","employer":"Housedown","email":"carverpeters@housedown.com","city":"Nadine","state":"MD"} +{"index":{"_id":"534"}} +{"account_number":534,"balance":20470,"firstname":"Cristina","lastname":"Russo","age":25,"gender":"F","address":"500 Highlawn Avenue","employer":"Cyclonica","email":"cristinarusso@cyclonica.com","city":"Gorst","state":"KS"} +{"index":{"_id":"539"}} +{"account_number":539,"balance":24560,"firstname":"Tami","lastname":"Maddox","age":23,"gender":"F","address":"741 Pineapple Street","employer":"Accidency","email":"tamimaddox@accidency.com","city":"Kennedyville","state":"OH"} +{"index":{"_id":"541"}} +{"account_number":541,"balance":42915,"firstname":"Logan","lastname":"Burke","age":32,"gender":"M","address":"904 Clarendon Road","employer":"Overplex","email":"loganburke@overplex.com","city":"Johnsonburg","state":"OH"} +{"index":{"_id":"546"}} +{"account_number":546,"balance":43242,"firstname":"Bernice","lastname":"Sims","age":33,"gender":"M","address":"382 Columbia Street","employer":"Verbus","email":"bernicesims@verbus.com","city":"Sena","state":"KY"} +{"index":{"_id":"553"}} +{"account_number":553,"balance":28390,"firstname":"Aimee","lastname":"Cohen","age":28,"gender":"M","address":"396 Lafayette Avenue","employer":"Eplode","email":"aimeecohen@eplode.com","city":"Thatcher","state":"NJ"} +{"index":{"_id":"558"}} +{"account_number":558,"balance":8922,"firstname":"Horne","lastname":"Valenzuela","age":20,"gender":"F","address":"979 Kensington Street","employer":"Isoternia","email":"hornevalenzuela@isoternia.com","city":"Greenbush","state":"NC"} +{"index":{"_id":"560"}} +{"account_number":560,"balance":24514,"firstname":"Felecia","lastname":"Oneill","age":26,"gender":"M","address":"995 Autumn Avenue","employer":"Mediot","email":"feleciaoneill@mediot.com","city":"Joppa","state":"IN"} +{"index":{"_id":"565"}} +{"account_number":565,"balance":15197,"firstname":"Taylor","lastname":"Ingram","age":37,"gender":"F","address":"113 Will Place","employer":"Lyrichord","email":"tayloringram@lyrichord.com","city":"Collins","state":"ME"} +{"index":{"_id":"572"}} +{"account_number":572,"balance":49355,"firstname":"Therese","lastname":"Espinoza","age":20,"gender":"M","address":"994 Chester Court","employer":"Gonkle","email":"thereseespinoza@gonkle.com","city":"Hayes","state":"UT"} +{"index":{"_id":"577"}} +{"account_number":577,"balance":21398,"firstname":"Gilbert","lastname":"Serrano","age":38,"gender":"F","address":"294 Troutman Street","employer":"Senmao","email":"gilbertserrano@senmao.com","city":"Greer","state":"MT"} +{"index":{"_id":"584"}} +{"account_number":584,"balance":5346,"firstname":"Pearson","lastname":"Bryant","age":40,"gender":"F","address":"971 Heyward Street","employer":"Anacho","email":"pearsonbryant@anacho.com","city":"Bluffview","state":"MN"} +{"index":{"_id":"589"}} +{"account_number":589,"balance":33260,"firstname":"Ericka","lastname":"Cote","age":39,"gender":"F","address":"425 Bath Avenue","employer":"Venoflex","email":"erickacote@venoflex.com","city":"Blue","state":"CT"} +{"index":{"_id":"591"}} +{"account_number":591,"balance":48997,"firstname":"Rivers","lastname":"Macdonald","age":34,"gender":"F","address":"919 Johnson Street","employer":"Ziore","email":"riversmacdonald@ziore.com","city":"Townsend","state":"IL"} +{"index":{"_id":"596"}} +{"account_number":596,"balance":4063,"firstname":"Letitia","lastname":"Walker","age":26,"gender":"F","address":"963 Vanderveer Place","employer":"Zizzle","email":"letitiawalker@zizzle.com","city":"Rossmore","state":"ID"} +{"index":{"_id":"604"}} +{"account_number":604,"balance":10675,"firstname":"Isabel","lastname":"Gilliam","age":23,"gender":"M","address":"854 Broadway ","employer":"Zenthall","email":"isabelgilliam@zenthall.com","city":"Ventress","state":"WI"} +{"index":{"_id":"609"}} +{"account_number":609,"balance":28586,"firstname":"Montgomery","lastname":"Washington","age":30,"gender":"M","address":"169 Schroeders Avenue","employer":"Kongle","email":"montgomerywashington@kongle.com","city":"Croom","state":"AZ"} +{"index":{"_id":"611"}} +{"account_number":611,"balance":17528,"firstname":"Katherine","lastname":"Prince","age":33,"gender":"F","address":"705 Elm Avenue","employer":"Zillacon","email":"katherineprince@zillacon.com","city":"Rew","state":"MI"} +{"index":{"_id":"616"}} +{"account_number":616,"balance":25276,"firstname":"Jessie","lastname":"Mayer","age":35,"gender":"F","address":"683 Chester Avenue","employer":"Emtrak","email":"jessiemayer@emtrak.com","city":"Marysville","state":"HI"} +{"index":{"_id":"623"}} +{"account_number":623,"balance":20514,"firstname":"Rose","lastname":"Combs","age":32,"gender":"F","address":"312 Grimes Road","employer":"Aquamate","email":"rosecombs@aquamate.com","city":"Fostoria","state":"OH"} +{"index":{"_id":"628"}} +{"account_number":628,"balance":42736,"firstname":"Buckner","lastname":"Chen","age":37,"gender":"M","address":"863 Rugby Road","employer":"Jamnation","email":"bucknerchen@jamnation.com","city":"Camas","state":"TX"} +{"index":{"_id":"630"}} +{"account_number":630,"balance":46060,"firstname":"Leanne","lastname":"Jones","age":31,"gender":"M","address":"451 Bayview Avenue","employer":"Wazzu","email":"leannejones@wazzu.com","city":"Kylertown","state":"OK"} +{"index":{"_id":"635"}} +{"account_number":635,"balance":44705,"firstname":"Norman","lastname":"Gilmore","age":33,"gender":"M","address":"330 Gates Avenue","employer":"Comfirm","email":"normangilmore@comfirm.com","city":"Riceville","state":"TN"} +{"index":{"_id":"642"}} +{"account_number":642,"balance":32852,"firstname":"Reyna","lastname":"Harris","age":35,"gender":"M","address":"305 Powell Street","employer":"Bedlam","email":"reynaharris@bedlam.com","city":"Florence","state":"KS"} +{"index":{"_id":"647"}} +{"account_number":647,"balance":10147,"firstname":"Annabelle","lastname":"Velazquez","age":30,"gender":"M","address":"299 Kensington Walk","employer":"Sealoud","email":"annabellevelazquez@sealoud.com","city":"Soudan","state":"ME"} +{"index":{"_id":"654"}} +{"account_number":654,"balance":38695,"firstname":"Armstrong","lastname":"Frazier","age":25,"gender":"M","address":"899 Seeley Street","employer":"Zensor","email":"armstrongfrazier@zensor.com","city":"Cherokee","state":"UT"} +{"index":{"_id":"659"}} +{"account_number":659,"balance":29648,"firstname":"Dorsey","lastname":"Sosa","age":40,"gender":"M","address":"270 Aberdeen Street","employer":"Daycore","email":"dorseysosa@daycore.com","city":"Chamberino","state":"SC"} +{"index":{"_id":"661"}} +{"account_number":661,"balance":3679,"firstname":"Joanne","lastname":"Spencer","age":39,"gender":"F","address":"910 Montauk Avenue","employer":"Visalia","email":"joannespencer@visalia.com","city":"Valmy","state":"NH"} +{"index":{"_id":"666"}} +{"account_number":666,"balance":13880,"firstname":"Mcguire","lastname":"Lloyd","age":40,"gender":"F","address":"658 Just Court","employer":"Centrexin","email":"mcguirelloyd@centrexin.com","city":"Warren","state":"MT"} +{"index":{"_id":"673"}} +{"account_number":673,"balance":11303,"firstname":"Mcdaniel","lastname":"Harrell","age":33,"gender":"M","address":"565 Montgomery Place","employer":"Eyeris","email":"mcdanielharrell@eyeris.com","city":"Garnet","state":"NV"} +{"index":{"_id":"678"}} +{"account_number":678,"balance":43663,"firstname":"Ruby","lastname":"Shaffer","age":28,"gender":"M","address":"350 Clark Street","employer":"Comtrail","email":"rubyshaffer@comtrail.com","city":"Aurora","state":"MA"} +{"index":{"_id":"680"}} +{"account_number":680,"balance":31561,"firstname":"Melton","lastname":"Camacho","age":32,"gender":"F","address":"771 Montana Place","employer":"Insuresys","email":"meltoncamacho@insuresys.com","city":"Sparkill","state":"IN"} +{"index":{"_id":"685"}} +{"account_number":685,"balance":22249,"firstname":"Yesenia","lastname":"Rowland","age":24,"gender":"F","address":"193 Dekalb Avenue","employer":"Coriander","email":"yeseniarowland@coriander.com","city":"Lupton","state":"NC"} +{"index":{"_id":"692"}} +{"account_number":692,"balance":10435,"firstname":"Haney","lastname":"Barlow","age":21,"gender":"F","address":"267 Lenox Road","employer":"Egypto","email":"haneybarlow@egypto.com","city":"Detroit","state":"IN"} +{"index":{"_id":"697"}} +{"account_number":697,"balance":48745,"firstname":"Mallory","lastname":"Emerson","age":24,"gender":"F","address":"318 Dunne Court","employer":"Exoplode","email":"malloryemerson@exoplode.com","city":"Montura","state":"LA"} +{"index":{"_id":"700"}} +{"account_number":700,"balance":19164,"firstname":"Patel","lastname":"Durham","age":21,"gender":"F","address":"440 King Street","employer":"Icology","email":"pateldurham@icology.com","city":"Mammoth","state":"IL"} +{"index":{"_id":"705"}} +{"account_number":705,"balance":28415,"firstname":"Krystal","lastname":"Cross","age":22,"gender":"M","address":"604 Drew Street","employer":"Tubesys","email":"krystalcross@tubesys.com","city":"Dalton","state":"MO"} +{"index":{"_id":"712"}} +{"account_number":712,"balance":12459,"firstname":"Butler","lastname":"Alston","age":37,"gender":"M","address":"486 Hemlock Street","employer":"Quordate","email":"butleralston@quordate.com","city":"Verdi","state":"MS"} +{"index":{"_id":"717"}} +{"account_number":717,"balance":29270,"firstname":"Erickson","lastname":"Mcdonald","age":31,"gender":"M","address":"873 Franklin Street","employer":"Exotechno","email":"ericksonmcdonald@exotechno.com","city":"Jessie","state":"MS"} +{"index":{"_id":"724"}} +{"account_number":724,"balance":12548,"firstname":"Hopper","lastname":"Peck","age":31,"gender":"M","address":"849 Hendrickson Street","employer":"Uxmox","email":"hopperpeck@uxmox.com","city":"Faxon","state":"UT"} +{"index":{"_id":"729"}} +{"account_number":729,"balance":41812,"firstname":"Katy","lastname":"Rivera","age":36,"gender":"F","address":"791 Olive Street","employer":"Blurrybus","email":"katyrivera@blurrybus.com","city":"Innsbrook","state":"MI"} +{"index":{"_id":"731"}} +{"account_number":731,"balance":4994,"firstname":"Lorene","lastname":"Weiss","age":35,"gender":"M","address":"990 Ocean Court","employer":"Comvoy","email":"loreneweiss@comvoy.com","city":"Lavalette","state":"WI"} +{"index":{"_id":"736"}} +{"account_number":736,"balance":28677,"firstname":"Rogers","lastname":"Mcmahon","age":21,"gender":"F","address":"423 Cameron Court","employer":"Brainclip","email":"rogersmcmahon@brainclip.com","city":"Saddlebrooke","state":"FL"} +{"index":{"_id":"743"}} +{"account_number":743,"balance":14077,"firstname":"Susana","lastname":"Moody","age":23,"gender":"M","address":"842 Fountain Avenue","employer":"Bitrex","email":"susanamoody@bitrex.com","city":"Temperanceville","state":"TN"} +{"index":{"_id":"748"}} +{"account_number":748,"balance":38060,"firstname":"Ford","lastname":"Branch","age":25,"gender":"M","address":"926 Cypress Avenue","employer":"Buzzness","email":"fordbranch@buzzness.com","city":"Beason","state":"DC"} +{"index":{"_id":"750"}} +{"account_number":750,"balance":40481,"firstname":"Cherie","lastname":"Brooks","age":20,"gender":"F","address":"601 Woodhull Street","employer":"Kaggle","email":"cheriebrooks@kaggle.com","city":"Groton","state":"MA"} +{"index":{"_id":"755"}} +{"account_number":755,"balance":43878,"firstname":"Bartlett","lastname":"Conway","age":22,"gender":"M","address":"453 Times Placez","employer":"Konnect","email":"bartlettconway@konnect.com","city":"Belva","state":"VT"} +{"index":{"_id":"762"}} +{"account_number":762,"balance":10291,"firstname":"Amanda","lastname":"Head","age":20,"gender":"F","address":"990 Ocean Parkway","employer":"Zentury","email":"amandahead@zentury.com","city":"Hegins","state":"AR"} +{"index":{"_id":"767"}} +{"account_number":767,"balance":26220,"firstname":"Anthony","lastname":"Sutton","age":27,"gender":"F","address":"179 Fayette Street","employer":"Xiix","email":"anthonysutton@xiix.com","city":"Iberia","state":"TN"} +{"index":{"_id":"774"}} +{"account_number":774,"balance":35287,"firstname":"Lynnette","lastname":"Alvarez","age":38,"gender":"F","address":"991 Brightwater Avenue","employer":"Gink","email":"lynnettealvarez@gink.com","city":"Leola","state":"NC"} +{"index":{"_id":"779"}} +{"account_number":779,"balance":40983,"firstname":"Maggie","lastname":"Pace","age":32,"gender":"F","address":"104 Harbor Court","employer":"Bulljuice","email":"maggiepace@bulljuice.com","city":"Floris","state":"MA"} +{"index":{"_id":"781"}} +{"account_number":781,"balance":29961,"firstname":"Sanford","lastname":"Mullen","age":26,"gender":"F","address":"879 Dover Street","employer":"Zanity","email":"sanfordmullen@zanity.com","city":"Martinez","state":"TX"} +{"index":{"_id":"786"}} +{"account_number":786,"balance":3024,"firstname":"Rene","lastname":"Vang","age":33,"gender":"M","address":"506 Randolph Street","employer":"Isopop","email":"renevang@isopop.com","city":"Vienna","state":"NJ"} +{"index":{"_id":"793"}} +{"account_number":793,"balance":16911,"firstname":"Alford","lastname":"Compton","age":36,"gender":"M","address":"186 Veronica Place","employer":"Zyple","email":"alfordcompton@zyple.com","city":"Sugartown","state":"AK"} +{"index":{"_id":"798"}} +{"account_number":798,"balance":3165,"firstname":"Catherine","lastname":"Ward","age":30,"gender":"F","address":"325 Burnett Street","employer":"Dreamia","email":"catherineward@dreamia.com","city":"Glenbrook","state":"SD"} +{"index":{"_id":"801"}} +{"account_number":801,"balance":14954,"firstname":"Molly","lastname":"Maldonado","age":37,"gender":"M","address":"518 Maple Avenue","employer":"Straloy","email":"mollymaldonado@straloy.com","city":"Hebron","state":"WI"} +{"index":{"_id":"806"}} +{"account_number":806,"balance":36492,"firstname":"Carson","lastname":"Riddle","age":31,"gender":"M","address":"984 Lois Avenue","employer":"Terrago","email":"carsonriddle@terrago.com","city":"Leland","state":"MN"} +{"index":{"_id":"813"}} +{"account_number":813,"balance":30833,"firstname":"Ebony","lastname":"Bishop","age":20,"gender":"M","address":"487 Ridge Court","employer":"Optique","email":"ebonybishop@optique.com","city":"Fairmount","state":"WA"} +{"index":{"_id":"818"}} +{"account_number":818,"balance":24433,"firstname":"Espinoza","lastname":"Petersen","age":26,"gender":"M","address":"641 Glenwood Road","employer":"Futurity","email":"espinozapetersen@futurity.com","city":"Floriston","state":"MD"} +{"index":{"_id":"820"}} +{"account_number":820,"balance":1011,"firstname":"Shepard","lastname":"Ramsey","age":24,"gender":"F","address":"806 Village Court","employer":"Mantro","email":"shepardramsey@mantro.com","city":"Tibbie","state":"NV"} +{"index":{"_id":"825"}} +{"account_number":825,"balance":49000,"firstname":"Terra","lastname":"Witt","age":21,"gender":"F","address":"590 Conway Street","employer":"Insectus","email":"terrawitt@insectus.com","city":"Forbestown","state":"AR"} +{"index":{"_id":"832"}} +{"account_number":832,"balance":8582,"firstname":"Laura","lastname":"Gibbs","age":39,"gender":"F","address":"511 Osborn Street","employer":"Corepan","email":"lauragibbs@corepan.com","city":"Worcester","state":"KS"} +{"index":{"_id":"837"}} +{"account_number":837,"balance":14485,"firstname":"Amy","lastname":"Villarreal","age":35,"gender":"M","address":"381 Stillwell Place","employer":"Fleetmix","email":"amyvillarreal@fleetmix.com","city":"Sanford","state":"IA"} +{"index":{"_id":"844"}} +{"account_number":844,"balance":26840,"firstname":"Jill","lastname":"David","age":31,"gender":"M","address":"346 Legion Street","employer":"Zytrax","email":"jilldavid@zytrax.com","city":"Saticoy","state":"SC"} +{"index":{"_id":"849"}} +{"account_number":849,"balance":16200,"firstname":"Barry","lastname":"Chapman","age":26,"gender":"M","address":"931 Dekoven Court","employer":"Darwinium","email":"barrychapman@darwinium.com","city":"Whitestone","state":"WY"} +{"index":{"_id":"851"}} +{"account_number":851,"balance":22026,"firstname":"Henderson","lastname":"Price","age":33,"gender":"F","address":"530 Hausman Street","employer":"Plutorque","email":"hendersonprice@plutorque.com","city":"Brutus","state":"RI"} +{"index":{"_id":"856"}} +{"account_number":856,"balance":27583,"firstname":"Alissa","lastname":"Knox","age":25,"gender":"M","address":"258 Empire Boulevard","employer":"Geologix","email":"alissaknox@geologix.com","city":"Hartsville/Hartley","state":"MN"} +{"index":{"_id":"863"}} +{"account_number":863,"balance":23165,"firstname":"Melendez","lastname":"Fernandez","age":40,"gender":"M","address":"661 Johnson Avenue","employer":"Vixo","email":"melendezfernandez@vixo.com","city":"Farmers","state":"IL"} +{"index":{"_id":"868"}} +{"account_number":868,"balance":27624,"firstname":"Polly","lastname":"Barron","age":22,"gender":"M","address":"129 Frank Court","employer":"Geofarm","email":"pollybarron@geofarm.com","city":"Loyalhanna","state":"ND"} +{"index":{"_id":"870"}} +{"account_number":870,"balance":43882,"firstname":"Goff","lastname":"Phelps","age":21,"gender":"M","address":"164 Montague Street","employer":"Digigen","email":"goffphelps@digigen.com","city":"Weedville","state":"IL"} +{"index":{"_id":"875"}} +{"account_number":875,"balance":19655,"firstname":"Mercer","lastname":"Pratt","age":24,"gender":"M","address":"608 Perry Place","employer":"Twiggery","email":"mercerpratt@twiggery.com","city":"Eggertsville","state":"MO"} +{"index":{"_id":"882"}} +{"account_number":882,"balance":10895,"firstname":"Mari","lastname":"Landry","age":39,"gender":"M","address":"963 Gerald Court","employer":"Kenegy","email":"marilandry@kenegy.com","city":"Lithium","state":"NC"} +{"index":{"_id":"887"}} +{"account_number":887,"balance":31772,"firstname":"Eunice","lastname":"Watts","age":36,"gender":"F","address":"707 Stuyvesant Avenue","employer":"Memora","email":"eunicewatts@memora.com","city":"Westwood","state":"TN"} +{"index":{"_id":"894"}} +{"account_number":894,"balance":1031,"firstname":"Tyler","lastname":"Fitzgerald","age":32,"gender":"M","address":"787 Meserole Street","employer":"Jetsilk","email":"tylerfitzgerald@jetsilk.com","city":"Woodlands","state":"WV"} +{"index":{"_id":"899"}} +{"account_number":899,"balance":32953,"firstname":"Carney","lastname":"Callahan","age":23,"gender":"M","address":"724 Kimball Street","employer":"Mangelica","email":"carneycallahan@mangelica.com","city":"Tecolotito","state":"MT"} +{"index":{"_id":"902"}} +{"account_number":902,"balance":13345,"firstname":"Hallie","lastname":"Jarvis","age":23,"gender":"F","address":"237 Duryea Court","employer":"Anixang","email":"halliejarvis@anixang.com","city":"Boykin","state":"IN"} +{"index":{"_id":"907"}} +{"account_number":907,"balance":12961,"firstname":"Ingram","lastname":"William","age":36,"gender":"M","address":"826 Overbaugh Place","employer":"Genmex","email":"ingramwilliam@genmex.com","city":"Kimmell","state":"AK"} +{"index":{"_id":"914"}} +{"account_number":914,"balance":7120,"firstname":"Esther","lastname":"Bean","age":32,"gender":"F","address":"583 Macon Street","employer":"Applica","email":"estherbean@applica.com","city":"Homeworth","state":"MN"} +{"index":{"_id":"919"}} +{"account_number":919,"balance":39655,"firstname":"Shauna","lastname":"Hanson","age":27,"gender":"M","address":"557 Hart Place","employer":"Exospace","email":"shaunahanson@exospace.com","city":"Outlook","state":"LA"} +{"index":{"_id":"921"}} +{"account_number":921,"balance":49119,"firstname":"Barbara","lastname":"Wade","age":29,"gender":"M","address":"687 Hoyts Lane","employer":"Roughies","email":"barbarawade@roughies.com","city":"Sattley","state":"CO"} +{"index":{"_id":"926"}} +{"account_number":926,"balance":49433,"firstname":"Welch","lastname":"Mcgowan","age":21,"gender":"M","address":"833 Quincy Street","employer":"Atomica","email":"welchmcgowan@atomica.com","city":"Hampstead","state":"VT"} +{"index":{"_id":"933"}} +{"account_number":933,"balance":18071,"firstname":"Tabitha","lastname":"Cole","age":21,"gender":"F","address":"916 Rogers Avenue","employer":"Eclipto","email":"tabithacole@eclipto.com","city":"Lawrence","state":"TX"} +{"index":{"_id":"938"}} +{"account_number":938,"balance":9597,"firstname":"Sharron","lastname":"Santos","age":40,"gender":"F","address":"215 Matthews Place","employer":"Zenco","email":"sharronsantos@zenco.com","city":"Wattsville","state":"VT"} +{"index":{"_id":"940"}} +{"account_number":940,"balance":23285,"firstname":"Melinda","lastname":"Mendoza","age":38,"gender":"M","address":"806 Kossuth Place","employer":"Kneedles","email":"melindamendoza@kneedles.com","city":"Coaldale","state":"OK"} +{"index":{"_id":"945"}} +{"account_number":945,"balance":23085,"firstname":"Hansen","lastname":"Hebert","age":33,"gender":"F","address":"287 Conduit Boulevard","employer":"Capscreen","email":"hansenhebert@capscreen.com","city":"Taycheedah","state":"AK"} +{"index":{"_id":"952"}} +{"account_number":952,"balance":21430,"firstname":"Angelique","lastname":"Weeks","age":33,"gender":"M","address":"659 Reeve Place","employer":"Exodoc","email":"angeliqueweeks@exodoc.com","city":"Turpin","state":"MD"} +{"index":{"_id":"957"}} +{"account_number":957,"balance":11373,"firstname":"Michael","lastname":"Giles","age":31,"gender":"M","address":"668 Court Square","employer":"Yogasm","email":"michaelgiles@yogasm.com","city":"Rosburg","state":"WV"} +{"index":{"_id":"964"}} +{"account_number":964,"balance":26154,"firstname":"Elena","lastname":"Waller","age":34,"gender":"F","address":"618 Crystal Street","employer":"Insurety","email":"elenawaller@insurety.com","city":"Gallina","state":"NY"} +{"index":{"_id":"969"}} +{"account_number":969,"balance":22214,"firstname":"Briggs","lastname":"Lynn","age":30,"gender":"M","address":"952 Lester Court","employer":"Quinex","email":"briggslynn@quinex.com","city":"Roland","state":"ID"} +{"index":{"_id":"971"}} +{"account_number":971,"balance":22772,"firstname":"Gabrielle","lastname":"Reilly","age":32,"gender":"F","address":"964 Tudor Terrace","employer":"Blanet","email":"gabriellereilly@blanet.com","city":"Falmouth","state":"AL"} +{"index":{"_id":"976"}} +{"account_number":976,"balance":31707,"firstname":"Mullen","lastname":"Tanner","age":26,"gender":"M","address":"711 Whitney Avenue","employer":"Pulze","email":"mullentanner@pulze.com","city":"Mooresburg","state":"MA"} +{"index":{"_id":"983"}} +{"account_number":983,"balance":47205,"firstname":"Mattie","lastname":"Eaton","age":24,"gender":"F","address":"418 Allen Avenue","employer":"Trasola","email":"mattieeaton@trasola.com","city":"Dupuyer","state":"NJ"} +{"index":{"_id":"988"}} +{"account_number":988,"balance":17803,"firstname":"Lucy","lastname":"Castro","age":34,"gender":"F","address":"425 Fleet Walk","employer":"Geekfarm","email":"lucycastro@geekfarm.com","city":"Mulino","state":"VA"} +{"index":{"_id":"990"}} +{"account_number":990,"balance":44456,"firstname":"Kelly","lastname":"Steele","age":35,"gender":"M","address":"809 Hoyt Street","employer":"Eschoir","email":"kellysteele@eschoir.com","city":"Stewartville","state":"ID"} +{"index":{"_id":"995"}} +{"account_number":995,"balance":21153,"firstname":"Phelps","lastname":"Parrish","age":25,"gender":"M","address":"666 Miller Place","employer":"Pearlessa","email":"phelpsparrish@pearlessa.com","city":"Brecon","state":"ME"} diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/MatrixStatsAggregationBuilders.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/MatrixStatsAggregationBuilders.java index 10758979ed9..38c412738fc 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/MatrixStatsAggregationBuilders.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/MatrixStatsAggregationBuilders.java @@ -21,8 +21,6 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.search.aggregations.matrix.stats.MatrixStats; import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder; -/** - */ public class MatrixStatsAggregationBuilders { /** * Create a new {@link MatrixStats} aggregation with the given name. diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java index 26e7910dcac..2ad23417956 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java @@ -33,8 +33,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -/** - */ public class MatrixStatsAggregatorFactory extends MultiValuesSourceAggregatorFactory { diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsParser.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsParser.java index ea383b642c2..6e7eef863cb 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsParser.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsParser.java @@ -31,8 +31,6 @@ import java.util.Map; import static org.elasticsearch.search.aggregations.support.MultiValuesSourceAggregationBuilder.MULTIVALUE_MODE_FIELD; -/** - */ public class MatrixStatsParser extends NumericValuesSourceParser { public MatrixStatsParser() { diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index e3aa171fe3d..8524f7d79b4 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -44,9 +44,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -/** - * - */ public abstract class MultiValuesSourceAggregationBuilder> extends AbstractAggregationBuilder { diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java index 0de5e13c058..530c13ca2ce 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.script.Script.ScriptField; +import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregationBuilder.CommonFields; @@ -35,9 +35,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -/** - * - */ public abstract class MultiValuesSourceParser implements Aggregator.Parser { public abstract static class AnyValuesSourceParser extends MultiValuesSourceParser { @@ -114,7 +111,7 @@ public abstract class MultiValuesSourceParser implement while (parser.nextToken() != XContentParser.Token.END_OBJECT) { parseMissingAndAdd(aggregationName, currentFieldName, parser, missingMap); } - } else if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { + } else if (context.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " + "Multi-field aggregations do not support scripts."); @@ -124,7 +121,7 @@ public abstract class MultiValuesSourceParser implement "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { - if (context.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) { + if (context.getParseFieldMatcher().match(currentFieldName, Script.SCRIPT_PARSE_FIELD)) { throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " + "Multi-field aggregations do not support scripts."); diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java index 6fb70ad3d63..11ddd2dfd41 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java @@ -34,6 +34,6 @@ public class MatrixStatsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/BaseMatrixStatsTestCase.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/BaseMatrixStatsTestCase.java index 2e4fa4313bd..81c9d514636 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/BaseMatrixStatsTestCase.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/BaseMatrixStatsTestCase.java @@ -26,9 +26,6 @@ import java.util.HashMap; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public abstract class BaseMatrixStatsTestCase extends ESTestCase { protected final int numObs = atLeast(10000); protected final ArrayList fieldA = new ArrayList<>(numObs); diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStatsTests.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStatsTests.java index f90f00d2a79..ca22e33e6bc 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStatsTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStatsTests.java @@ -20,9 +20,6 @@ package org.elasticsearch.search.aggregations.matrix.stats; import java.util.List; -/** - * - */ public class RunningStatsTests extends BaseMatrixStatsTestCase { /** test running stats */ diff --git a/modules/build.gradle b/modules/build.gradle index d5b207625c1..b3dbde24936 100644 --- a/modules/build.gradle +++ b/modules/build.gradle @@ -25,6 +25,11 @@ subprojects { // for local ES plugins, the name of the plugin is the same as the directory name project.name } + + run { + // these cannot be run with the normal distribution, since they are included in it! + distribution = 'integ-test-zip' + } if (project.file('src/main/packaging').exists()) { throw new InvalidModelException("Modules cannot contain packaging files") diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java index 94f335cc12f..7828c5f67fb 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java @@ -36,9 +36,9 @@ import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationExcept import static org.elasticsearch.ingest.ConfigurationUtils.readOptionalMap; import static org.elasticsearch.ingest.ConfigurationUtils.readOptionalStringProperty; import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty; -import static org.elasticsearch.script.ScriptService.ScriptType.FILE; -import static org.elasticsearch.script.ScriptService.ScriptType.INLINE; -import static org.elasticsearch.script.ScriptService.ScriptType.STORED; +import static org.elasticsearch.script.ScriptType.FILE; +import static org.elasticsearch.script.ScriptType.INLINE; +import static org.elasticsearch.script.ScriptType.STORED; /** * Processor that adds new fields with their corresponding values. If the field is already present, its value @@ -59,7 +59,7 @@ public final class ScriptProcessor extends AbstractProcessor { @Override public void execute(IngestDocument document) { - ExecutableScript executableScript = scriptService.executable(script, ScriptContext.Standard.INGEST, emptyMap()); + ExecutableScript executableScript = scriptService.executable(script, ScriptContext.Standard.INGEST); executableScript.setNextVar("ctx", document.getSourceAndMetadata()); executableScript.run(); } @@ -69,6 +69,10 @@ public final class ScriptProcessor extends AbstractProcessor { return TYPE; } + Script getScript() { + return script; + } + public static final class Factory implements Processor.Factory { private final ScriptService scriptService; @@ -78,9 +82,10 @@ public final class ScriptProcessor extends AbstractProcessor { } @Override + @SuppressWarnings("unchecked") public ScriptProcessor create(Map registry, String processorTag, Map config) throws Exception { - String lang = readStringProperty(TYPE, processorTag, config, "lang"); + String lang = readOptionalStringProperty(TYPE, processorTag, config, "lang"); String inline = readOptionalStringProperty(TYPE, processorTag, config, "inline"); String file = readOptionalStringProperty(TYPE, processorTag, config, "file"); String id = readOptionalStringProperty(TYPE, processorTag, config, "id"); @@ -97,17 +102,21 @@ public final class ScriptProcessor extends AbstractProcessor { throw newConfigurationException(TYPE, processorTag, null, "Only one of [file], [id], or [inline] may be configured"); } - if(params == null) { + if (lang == null) { + lang = Script.DEFAULT_SCRIPT_LANG; + } + + if (params == null) { params = emptyMap(); } final Script script; if (Strings.hasLength(file)) { - script = new Script(file, FILE, lang, params); + script = new Script(FILE, lang, file, (Map)params); } else if (Strings.hasLength(inline)) { - script = new Script(inline, INLINE, lang, params); + script = new Script(INLINE, lang, inline, (Map)params); } else if (Strings.hasLength(id)) { - script = new Script(id, STORED, lang, params); + script = new Script(STORED, lang, id, (Map)params); } else { throw newConfigurationException(TYPE, processorTag, null, "Could not initialize script"); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java index 3c71f5710fd..1b678835c4b 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class IngestCommonClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java index 27eeb80670a..938b5be7f76 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -29,18 +29,48 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; public class ScriptProcessorFactoryTests extends ESTestCase { private ScriptProcessor.Factory factory; + private static final Map ingestScriptParamToType; + static { + Map map = new HashMap<>(); + map.put("id", "stored"); + map.put("inline", "inline"); + map.put("file", "file"); + ingestScriptParamToType = Collections.unmodifiableMap(map); + } @Before public void init() { factory = new ScriptProcessor.Factory(mock(ScriptService.class)); } + public void testFactoryValidationWithDefaultLang() throws Exception { + Map configMap = new HashMap<>(); + String randomType = randomFrom("id", "inline", "file"); + configMap.put(randomType, "foo"); + ScriptProcessor processor = factory.create(null, randomAsciiOfLength(10), configMap); + assertThat(processor.getScript().getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); + assertThat(processor.getScript().getType().toString(), equalTo(ingestScriptParamToType.get(randomType))); + assertThat(processor.getScript().getParams(), equalTo(Collections.emptyMap())); + } + + public void testFactoryValidationWithParams() throws Exception { + Map configMap = new HashMap<>(); + String randomType = randomFrom("id", "inline", "file"); + Map randomParams = Collections.singletonMap(randomAsciiOfLength(10), randomAsciiOfLength(10)); + configMap.put(randomType, "foo"); + configMap.put("params", randomParams); + ScriptProcessor processor = factory.create(null, randomAsciiOfLength(10), configMap); + assertThat(processor.getScript().getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); + assertThat(processor.getScript().getType().toString(), equalTo(ingestScriptParamToType.get(randomType))); + assertThat(processor.getScript().getParams(), equalTo(randomParams)); + } public void testFactoryValidationForMultipleScriptingTypes() throws Exception { Map configMap = new HashMap<>(); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java index c32b0f101a0..d59da982d2e 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java @@ -48,7 +48,7 @@ public class ScriptProcessorTests extends ESTestCase { ScriptService scriptService = mock(ScriptService.class); Script script = new Script("_script"); ExecutableScript executableScript = mock(ExecutableScript.class); - when(scriptService.executable(any(), any(), any())).thenReturn(executableScript); + when(scriptService.executable(any(Script.class), any())).thenReturn(executableScript); Map document = new HashMap<>(); document.put("bytes_in", randomInt()); diff --git a/modules/lang-expression/licenses/lucene-expressions-6.2.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.2.0.jar.sha1 deleted file mode 100644 index 205aaae6e66..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -99764b20aba5443f8a181f7015a806443c589844 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-6.3.0-snapshot-a66a445.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..aadc6a31524 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +0bf61de45f8ea73a185d48572ea094f6b696a7a8 \ No newline at end of file diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java index 2b74aaa9f1e..b9c628926e3 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ScriptException; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -34,7 +34,7 @@ import java.util.Collections; public class ExpressionTests extends ESSingleNodeTestCase { ExpressionScriptEngineService service; SearchLookup lookup; - + @Override public void setUp() throws Exception { super.setUp(); @@ -42,7 +42,7 @@ public class ExpressionTests extends ESSingleNodeTestCase { service = new ExpressionScriptEngineService(Settings.EMPTY); lookup = new SearchLookup(index.mapperService(), index.fieldData(), null); } - + private SearchScript compile(String expression) { Object compiled = service.compile(null, expression, Collections.emptyMap()); return service.search(new CompiledScript(ScriptType.INLINE, "randomName", "expression", compiled), lookup, Collections.emptyMap()); @@ -54,14 +54,14 @@ public class ExpressionTests extends ESSingleNodeTestCase { assertTrue(compile("1/_score").needsScores()); assertTrue(compile("doc['d'].value * _score").needsScores()); } - + public void testCompileError() { ScriptException e = expectThrows(ScriptException.class, () -> { compile("doc['d'].value * *@#)(@$*@#$ + 4"); }); assertTrue(e.getCause() instanceof ParseException); } - + public void testLinkError() { ScriptException e = expectThrows(ScriptException.class, () -> { compile("doc['e'].value * 5"); diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/IndexedExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/IndexedExpressionTests.java index 731e8760e80..71c87937618 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/IndexedExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/IndexedExpressionTests.java @@ -23,8 +23,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; @@ -59,7 +58,7 @@ public class IndexedExpressionTests extends ESIntegTestCase { client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}").get(); try { client().prepareUpdate("test", "scriptTest", "1") - .setScript(new Script("script1", ScriptService.ScriptType.STORED, ExpressionScriptEngineService.NAME, null)).get(); + .setScript(new Script(ScriptType.STORED, ExpressionScriptEngineService.NAME, "script1", Collections.emptyMap())).get(); fail("update script should have been rejected"); } catch(Exception e) { assertThat(e.getMessage(), containsString("failed to execute script")); @@ -68,7 +67,7 @@ public class IndexedExpressionTests extends ESIntegTestCase { try { client().prepareSearch() .setSource( - new SearchSourceBuilder().scriptField("test1", new Script("script1", ScriptType.STORED, "expression", null))) + new SearchSourceBuilder().scriptField("test1", new Script(ScriptType.STORED, "expression", "script1", Collections.emptyMap()))) .setIndices("test").setTypes("scriptTest").get(); fail("search script should have been rejected"); } catch(Exception e) { @@ -78,7 +77,7 @@ public class IndexedExpressionTests extends ESIntegTestCase { client().prepareSearch("test") .setSource( new SearchSourceBuilder().aggregation(AggregationBuilders.terms("test").script( - new Script("script1", ScriptType.STORED, "expression", null)))).get(); + new Script(ScriptType.STORED, "expression", "script1", Collections.emptyMap())))).get(); } catch (Exception e) { assertThat(e.toString(), containsString("scripts of type [stored], operation [aggs] and lang [expression] are disabled")); } diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/LangExpressionClientYamlTestSuiteIT.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/LangExpressionClientYamlTestSuiteIT.java index 3d1071ee17c..9a30def83e1 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/LangExpressionClientYamlTestSuiteIT.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/LangExpressionClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class LangExpressionClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java index d71d09f2f37..aa78a9e98ec 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java @@ -36,7 +36,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.GeneralScriptException; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -85,7 +85,7 @@ public class MoreExpressionTests extends ESIntegTestCase { req.setQuery(QueryBuilders.matchAllQuery()) .addSort(SortBuilders.fieldSort("_uid") .order(SortOrder.ASC)) - .addScriptField("foo", new Script(script, ScriptType.INLINE, "expression", paramsMap)); + .addScriptField("foo", new Script(ScriptType.INLINE, "expression", script, paramsMap)); return req; } @@ -124,7 +124,7 @@ public class MoreExpressionTests extends ESIntegTestCase { client().prepareIndex("test", "doc", "1").setSource("text", "hello goodbye"), client().prepareIndex("test", "doc", "2").setSource("text", "hello hello hello goodbye"), client().prepareIndex("test", "doc", "3").setSource("text", "hello hello goodebye")); - ScoreFunctionBuilder score = ScoreFunctionBuilders.scriptFunction(new Script("1 / _score", ScriptType.INLINE, "expression", null)); + ScoreFunctionBuilder score = ScoreFunctionBuilders.scriptFunction(new Script(ScriptType.INLINE, "expression", "1 / _score", Collections.emptyMap())); SearchRequestBuilder req = client().prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode(CombineFunction.REPLACE)); req.setSearchType(SearchType.DFS_QUERY_THEN_FETCH); // make sure DF is consistent @@ -164,7 +164,7 @@ public class MoreExpressionTests extends ESIntegTestCase { assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); } - + public void testDateObjectMethods() throws Exception { ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("doc", "date0", "type=date", "date1", "type=date")); ensureGreen("test"); @@ -257,7 +257,7 @@ public class MoreExpressionTests extends ESIntegTestCase { assertEquals(2.5, hits.getAt(0).field("foo").getValue(), 0.0D); assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); assertEquals(1.5, hits.getAt(2).field("foo").getValue(), 0.0D); - + // make sure count() works for missing rsp = buildRequest("doc['double2'].count()").get(); assertSearchResponse(rsp); @@ -266,7 +266,7 @@ public class MoreExpressionTests extends ESIntegTestCase { assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); assertEquals(0.0, hits.getAt(1).field("foo").getValue(), 0.0D); assertEquals(0.0, hits.getAt(2).field("foo").getValue(), 0.0D); - + // make sure .empty works in the same way rsp = buildRequest("doc['double2'].empty ? 5.0 : 2.0").get(); assertSearchResponse(rsp); @@ -429,13 +429,16 @@ public class MoreExpressionTests extends ESIntegTestCase { req.setQuery(QueryBuilders.matchAllQuery()) .addAggregation( AggregationBuilders.stats("int_agg").field("x") - .script(new Script("_value * 3", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null))) + .script(new Script(ScriptType.INLINE, + ExpressionScriptEngineService.NAME, "_value * 3", Collections.emptyMap()))) .addAggregation( AggregationBuilders.stats("double_agg").field("y") - .script(new Script("_value - 1.1", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null))) + .script(new Script(ScriptType.INLINE, + ExpressionScriptEngineService.NAME, "_value - 1.1", Collections.emptyMap()))) .addAggregation( AggregationBuilders.stats("const_agg").field("x") // specifically to test a script w/o _value - .script(new Script("3.0", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null)) + .script(new Script(ScriptType.INLINE, + ExpressionScriptEngineService.NAME, "3.0", Collections.emptyMap())) ); SearchResponse rsp = req.get(); @@ -469,7 +472,8 @@ public class MoreExpressionTests extends ESIntegTestCase { req.setQuery(QueryBuilders.matchAllQuery()) .addAggregation( AggregationBuilders.terms("term_agg").field("text") - .script(new Script("_value", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null))); + .script( + new Script(ScriptType.INLINE, ExpressionScriptEngineService.NAME, "_value", Collections.emptyMap()))); String message; try { @@ -559,7 +563,7 @@ public class MoreExpressionTests extends ESIntegTestCase { UpdateRequestBuilder urb = client().prepareUpdate().setIndex("test_index"); urb.setType("doc"); urb.setId("1"); - urb.setScript(new Script("0", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null)); + urb.setScript(new Script(ScriptType.INLINE, ExpressionScriptEngineService.NAME, "0", Collections.emptyMap())); urb.get(); fail("Expression scripts should not be allowed to run as update scripts."); } catch (Exception e) { @@ -590,7 +594,8 @@ public class MoreExpressionTests extends ESIntegTestCase { .subAggregation(sum("threeSum").field("three")) .subAggregation(sum("fourSum").field("four")) .subAggregation(bucketScript("totalSum", - new Script("_value0 + _value1 + _value2", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null), + new Script(ScriptType.INLINE, + ExpressionScriptEngineService.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), "twoSum", "threeSum", "fourSum"))) .execute().actionGet(); @@ -616,7 +621,7 @@ public class MoreExpressionTests extends ESIntegTestCase { } } } - + public void testGeo() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location").field("type", "geo_point"); @@ -649,7 +654,7 @@ public class MoreExpressionTests extends ESIntegTestCase { assertEquals(1, rsp.getHits().getTotalHits()); assertEquals(3170D, rsp.getHits().getAt(0).field("foo").getValue(), 50D); } - + public void testBoolean() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("vip").field("type", "boolean"); diff --git a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java index a762720ff9d..0cd8976c76c 100644 --- a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java +++ b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java @@ -30,7 +30,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorer; import org.codehaus.groovy.ast.ClassCodeExpressionTransformer; import org.codehaus.groovy.ast.ClassNode; -import org.codehaus.groovy.ast.Parameter; import org.codehaus.groovy.ast.expr.ConstantExpression; import org.codehaus.groovy.ast.expr.Expression; import org.codehaus.groovy.classgen.GeneratorContext; diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyIndexedScriptTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyIndexedScriptTests.java index b0d5fd3366b..ce362e8d78c 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyIndexedScriptTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyIndexedScriptTests.java @@ -28,10 +28,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptService.ScriptType; -import org.elasticsearch.script.groovy.GroovyPlugin; -import org.elasticsearch.script.groovy.GroovyScriptEngineService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -93,9 +90,10 @@ public class GroovyIndexedScriptTests extends ESIntegTestCase { .prepareSearch() .setSource( new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).size(1) - .scriptField("test1", new Script("script1", ScriptType.STORED, GroovyScriptEngineService.NAME, null)) + .scriptField("test1", + new Script(ScriptType.STORED, GroovyScriptEngineService.NAME, "script1", Collections.emptyMap())) .scriptField("test2", - new Script("script2", ScriptType.STORED, GroovyScriptEngineService.NAME, script2Params))) + new Script(ScriptType.STORED, GroovyScriptEngineService.NAME, "script2", script2Params))) .setIndices("test").setTypes("scriptTest").get(); assertHitCount(searchResponse, 5); assertTrue(searchResponse.getHits().hits().length == 1); @@ -121,7 +119,7 @@ public class GroovyIndexedScriptTests extends ESIntegTestCase { .prepareSearch() .setSource( new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).scriptField("test_field", - new Script("script1", ScriptType.STORED, GroovyScriptEngineService.NAME, null))) + new Script(ScriptType.STORED, GroovyScriptEngineService.NAME, "script1", Collections.emptyMap()))) .setIndices("test_index") .setTypes("test_type").get(); assertHitCount(searchResponse, 1); @@ -138,7 +136,7 @@ public class GroovyIndexedScriptTests extends ESIntegTestCase { client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}").get(); try { client().prepareUpdate("test", "scriptTest", "1") - .setScript(new Script("script1", ScriptService.ScriptType.STORED, GroovyScriptEngineService.NAME, null)).get(); + .setScript(new Script(ScriptType.STORED, GroovyScriptEngineService.NAME, "script1", Collections.emptyMap())).get(); fail("update script should have been rejected"); } catch (Exception e) { assertThat(e.getMessage(), containsString("failed to execute script")); @@ -159,7 +157,7 @@ public class GroovyIndexedScriptTests extends ESIntegTestCase { .prepareSearch("test") .setSource( new SearchSourceBuilder().aggregation(AggregationBuilders.terms("test").script( - new Script("script1", ScriptType.STORED, GroovyScriptEngineService.NAME, null)))).get(); + new Script(ScriptType.STORED, GroovyScriptEngineService.NAME, "script1", Collections.emptyMap())))).get(); assertHitCount(searchResponse, 1); assertThat(searchResponse.getAggregations().get("test"), notNullValue()); } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java index 88d9b7be1de..196d878ae66 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; import org.elasticsearch.search.sort.SortBuilders; @@ -68,7 +68,7 @@ public class GroovyScriptTests extends ESIntegTestCase { } public void assertScript(String scriptString) { - Script script = new Script(scriptString, ScriptType.INLINE, GroovyScriptEngineService.NAME, null); + Script script = new Script(ScriptType.INLINE, GroovyScriptEngineService.NAME, scriptString, Collections.emptyMap()); SearchResponse resp = client().prepareSearch("test") .setSource(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).sort(SortBuilders. scriptSort(script, ScriptSortType.NUMBER))) @@ -85,8 +85,8 @@ public class GroovyScriptTests extends ESIntegTestCase { try { client().prepareSearch("test") .setQuery( - constantScoreQuery(scriptQuery(new Script("1 == not_found", ScriptType.INLINE, GroovyScriptEngineService.NAME, - null)))).get(); + constantScoreQuery(scriptQuery(new Script(ScriptType.INLINE, GroovyScriptEngineService.NAME, "1 == not_found", + Collections.emptyMap())))).get(); fail("should have thrown an exception"); } catch (SearchPhaseExecutionException e) { assertThat(e.toString()+ "should not contained NotSerializableTransportException", @@ -100,7 +100,7 @@ public class GroovyScriptTests extends ESIntegTestCase { try { client().prepareSearch("test") .setQuery(constantScoreQuery(scriptQuery( - new Script("null.foo", ScriptType.INLINE, GroovyScriptEngineService.NAME, null)))).get(); + new Script(ScriptType.INLINE, GroovyScriptEngineService.NAME, "null.foo", Collections.emptyMap())))).get(); fail("should have thrown an exception"); } catch (SearchPhaseExecutionException e) { assertThat(e.toString() + "should not contained NotSerializableTransportException", @@ -120,7 +120,7 @@ public class GroovyScriptTests extends ESIntegTestCase { // doc[] access SearchResponse resp = client().prepareSearch("test").setQuery(functionScoreQuery(scriptFunction( - new Script("doc['bar'].value", ScriptType.INLINE, GroovyScriptEngineService.NAME, null))) + new Script(ScriptType.INLINE, GroovyScriptEngineService.NAME, "doc['bar'].value", Collections.emptyMap()))) .boostMode(CombineFunction.REPLACE)).get(); assertNoFailures(resp); @@ -135,7 +135,7 @@ public class GroovyScriptTests extends ESIntegTestCase { // _score can be accessed SearchResponse resp = client().prepareSearch("test").setQuery(functionScoreQuery(matchQuery("foo", "dog"), - scriptFunction(new Script("_score", ScriptType.INLINE, GroovyScriptEngineService.NAME, null))) + scriptFunction(new Script(ScriptType.INLINE, GroovyScriptEngineService.NAME, "_score", Collections.emptyMap()))) .boostMode(CombineFunction.REPLACE)).get(); assertNoFailures(resp); assertSearchHits(resp, "3", "1"); @@ -147,7 +147,8 @@ public class GroovyScriptTests extends ESIntegTestCase { .prepareSearch("test") .setQuery( functionScoreQuery(matchQuery("foo", "dog"), scriptFunction( - new Script("_score > 0.0 ? _score : 0", ScriptType.INLINE, GroovyScriptEngineService.NAME, null))) + new Script(ScriptType.INLINE, + GroovyScriptEngineService.NAME, "_score > 0.0 ? _score : 0", Collections.emptyMap()))) .boostMode(CombineFunction.REPLACE)).get(); assertNoFailures(resp); assertSearchHits(resp, "3", "1"); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java index 31dc154a9e2..1ac31a70589 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java @@ -25,7 +25,7 @@ import org.codehaus.groovy.control.MultipleCompilationErrorsException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ScriptException; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import java.nio.file.Path; @@ -139,7 +139,7 @@ public class GroovySecurityTests extends ESTestCase { vars.put("myarray", Arrays.asList("foo")); vars.put("myobject", new MyObject()); - se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "js", se.compile(null, script, Collections.emptyMap())), vars).run(); + se.executable(new CompiledScript(ScriptType.INLINE, "test", "js", se.compile(null, script, Collections.emptyMap())), vars).run(); } public static class MyObject { diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/LangGroovyClientYamlTestSuiteIT.java b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/LangGroovyClientYamlTestSuiteIT.java index c8e9c74827a..f407b5b6ce7 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/LangGroovyClientYamlTestSuiteIT.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/LangGroovyClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class LangGroovyClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java index 8419730dc1c..662cd86f799 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java @@ -27,6 +27,7 @@ import com.github.mustachejava.Mustache; import com.github.mustachejava.MustacheException; import com.github.mustachejava.MustacheVisitor; import com.github.mustachejava.TemplateContext; +import com.github.mustachejava.codes.DefaultMustache; import com.github.mustachejava.codes.IterableCode; import com.github.mustachejava.codes.WriteCode; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -35,33 +36,67 @@ import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.io.StringWriter; import java.io.Writer; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.StringJoiner; -import java.util.function.BiConsumer; import java.util.function.Function; +import java.util.function.Supplier; import java.util.regex.Matcher; import java.util.regex.Pattern; public class CustomMustacheFactory extends DefaultMustacheFactory { - private final BiConsumer encoder; + static final String CONTENT_TYPE_PARAM = "content_type"; - public CustomMustacheFactory(boolean escaping) { + static final String JSON_MIME_TYPE_WITH_CHARSET = "application/json; charset=UTF-8"; + static final String JSON_MIME_TYPE = "application/json"; + static final String PLAIN_TEXT_MIME_TYPE = "text/plain"; + static final String X_WWW_FORM_URLENCODED_MIME_TYPE = "application/x-www-form-urlencoded"; + + private static final String DEFAULT_MIME_TYPE = JSON_MIME_TYPE; + + private static final Map> ENCODERS; + static { + Map> encoders = new HashMap<>(); + encoders.put(JSON_MIME_TYPE_WITH_CHARSET, JsonEscapeEncoder::new); + encoders.put(JSON_MIME_TYPE, JsonEscapeEncoder::new); + encoders.put(PLAIN_TEXT_MIME_TYPE, DefaultEncoder::new); + encoders.put(X_WWW_FORM_URLENCODED_MIME_TYPE, UrlEncoder::new); + ENCODERS = Collections.unmodifiableMap(encoders); + } + + private final Encoder encoder; + + public CustomMustacheFactory(String mimeType) { super(); setObjectHandler(new CustomReflectionObjectHandler()); - if (escaping) { - this.encoder = new JsonEscapeEncoder(); - } else { - this.encoder = new NoEscapeEncoder(); - } + this.encoder = createEncoder(mimeType); + } + + public CustomMustacheFactory() { + this(DEFAULT_MIME_TYPE); } @Override public void encode(String value, Writer writer) { - encoder.accept(value, writer); + try { + encoder.encode(value, writer); + } catch (IOException e) { + throw new MustacheException("Unable to encode value", e); + } + } + + static Encoder createEncoder(String mimeType) { + Supplier supplier = ENCODERS.get(mimeType); + if (supplier == null) { + throw new IllegalArgumentException("No encoder found for MIME type [" + mimeType + "]"); + } + return supplier.get(); } @Override @@ -83,6 +118,8 @@ public class CustomMustacheFactory extends DefaultMustacheFactory { list.add(new JoinerCode(templateContext, df, mustache)); } else if (CustomJoinerCode.match(variable)) { list.add(new CustomJoinerCode(templateContext, df, mustache, variable)); + } else if (UrlEncoderCode.match(variable)) { + list.add(new UrlEncoderCode(templateContext, df, mustache, variable)); } else { list.add(new IterableCode(templateContext, df, mustache, variable)); } @@ -253,27 +290,85 @@ public class CustomMustacheFactory extends DefaultMustacheFactory { } } - class NoEscapeEncoder implements BiConsumer { + /** + * This function encodes a string using the {@link URLEncoder#encode(String, String)} method + * with the UTF-8 charset. + */ + static class UrlEncoderCode extends DefaultMustache { + + private static final String CODE = "url"; + private final Encoder encoder; + + public UrlEncoderCode(TemplateContext tc, DefaultMustacheFactory df, Mustache mustache, String variable) { + super(tc, df, mustache.getCodes(), variable); + this.encoder = new UrlEncoder(); + } @Override - public void accept(String s, Writer writer) { - try { - writer.write(s); - } catch (IOException e) { - throw new MustacheException("Failed to encode value: " + s); + public Writer run(Writer writer, List scopes) { + if (getCodes() != null) { + for (Code code : getCodes()) { + try (StringWriter capture = new StringWriter()) { + code.execute(capture, scopes); + + String s = capture.toString(); + if (s != null) { + encoder.encode(s, writer); + } + } catch (IOException e) { + throw new MustacheException("Exception while parsing mustache function at line " + tc.line(), e); + } + } } + return writer; + } + + static boolean match(String variable) { + return CODE.equalsIgnoreCase(variable); } } - class JsonEscapeEncoder implements BiConsumer { + @FunctionalInterface + interface Encoder { + /** + * Encodes the {@code s} string and writes it to the {@code writer} {@link Writer}. + * + * @param s The string to encode + * @param writer The {@link Writer} to which the encoded string will be written to + */ + void encode(final String s, final Writer writer) throws IOException; + } + + /** + * Encoder that simply writes the string to the writer without encoding. + */ + static class DefaultEncoder implements Encoder { @Override - public void accept(String s, Writer writer) { - try { - writer.write(JsonStringEncoder.getInstance().quoteAsString(s)); - } catch (IOException e) { - throw new MustacheException("Failed to escape and encode value: " + s); - } + public void encode(String s, Writer writer) throws IOException { + writer.write(s); + } + } + + /** + * Encoder that escapes JSON string values/fields. + */ + static class JsonEscapeEncoder implements Encoder { + + @Override + public void encode(String s, Writer writer) throws IOException { + writer.write(JsonStringEncoder.getInstance().quoteAsString(s)); + } + } + + /** + * Encoder that escapes strings using HTML form encoding + */ + static class UrlEncoder implements Encoder { + + @Override + public void encode(String s, Writer writer) throws IOException { + writer.write(URLEncoder.encode(s, StandardCharsets.UTF_8.name())); } } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java index b7d7087373c..08c0e1643bc 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java @@ -43,6 +43,8 @@ import java.security.PrivilegedAction; import java.util.Collections; import java.util.Map; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.CONTENT_TYPE_PARAM; + /** * Main entry point handling template registration, compilation and * execution. @@ -55,10 +57,6 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme public static final String NAME = "mustache"; - static final String CONTENT_TYPE_PARAM = "content_type"; - static final String JSON_CONTENT_TYPE = "application/json"; - static final String PLAIN_TEXT_CONTENT_TYPE = "text/plain"; - /** Thread local UTF8StreamWriter to store template execution results in, thread local to save object creation.*/ private static ThreadLocal> utf8StreamWriter = new ThreadLocal<>(); @@ -85,19 +83,21 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme * Compile a template string to (in this case) a Mustache object than can * later be re-used for execution to fill in missing parameter values. * - * @param templateSource - * a string representing the template to compile. + * @param templateSource a string representing the template to compile. * @return a compiled template object for later execution. * */ @Override public Object compile(String templateName, String templateSource, Map params) { - final MustacheFactory factory = new CustomMustacheFactory(isJsonEscapingEnabled(params)); + final MustacheFactory factory = createMustacheFactory(params); Reader reader = new FastStringReader(templateSource); return factory.compile(reader, "query-template"); } - private boolean isJsonEscapingEnabled(Map params) { - return JSON_CONTENT_TYPE.equals(params.getOrDefault(CONTENT_TYPE_PARAM, JSON_CONTENT_TYPE)); + private CustomMustacheFactory createMustacheFactory(Map params) { + if (params == null || params.isEmpty() || params.containsKey(CONTENT_TYPE_PARAM) == false) { + return new CustomMustacheFactory(); + } + return new CustomMustacheFactory(params.get(CONTENT_TYPE_PARAM)); } @Override diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java index a7b8615372f..818e170a90f 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; @@ -55,13 +54,13 @@ public class RestMultiSearchTemplateAction extends BaseRestHandler { } @Override - public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { if (RestActions.hasBodyContent(request) == false) { throw new ElasticsearchException("request body is required"); } MultiSearchTemplateRequest multiRequest = parseRequest(request, allowExplicitIndex); - client.execute(MultiSearchTemplateAction.INSTANCE, multiRequest, new RestToXContentListener<>(channel)); + return channel -> client.execute(MultiSearchTemplateAction.INSTANCE, multiRequest, new RestToXContentListener<>(channel)); } /** diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java index 74dc6363f3b..c41baae2e69 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java @@ -23,12 +23,13 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; + +import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -45,17 +46,17 @@ public class RestRenderSearchTemplateAction extends BaseRestHandler { } @Override - public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { // Creates the render template request SearchTemplateRequest renderRequest = RestSearchTemplateAction.parse(RestActions.getRestContent(request)); renderRequest.setSimulate(true); String id = request.param("id"); if (id != null) { - renderRequest.setScriptType(ScriptService.ScriptType.STORED); + renderRequest.setScriptType(ScriptType.STORED); renderRequest.setScript(id); } - client.execute(SearchTemplateAction.INSTANCE, renderRequest, new RestToXContentListener<>(channel)); + return channel -> client.execute(SearchTemplateAction.INSTANCE, renderRequest, new RestToXContentListener<>(channel)); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java index 6d830a21f5a..ed6ed6a050b 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java @@ -34,18 +34,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestStatusToXContentListener; import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchRequestParsers; -import org.elasticsearch.search.aggregations.AggregatorParsers; -import org.elasticsearch.search.suggest.Suggesters; import java.io.IOException; @@ -61,17 +57,17 @@ public class RestSearchTemplateAction extends BaseRestHandler { request.setScriptParams(parser.map()) , new ParseField("params"), ObjectParser.ValueType.OBJECT); PARSER.declareString((request, s) -> { - request.setScriptType(ScriptService.ScriptType.FILE); + request.setScriptType(ScriptType.FILE); request.setScript(s); }, new ParseField("file")); PARSER.declareString((request, s) -> { - request.setScriptType(ScriptService.ScriptType.STORED); + request.setScriptType(ScriptType.STORED); request.setScript(s); }, new ParseField("id")); PARSER.declareBoolean(SearchTemplateRequest::setExplain, new ParseField("explain")); PARSER.declareBoolean(SearchTemplateRequest::setProfile, new ParseField("profile")); PARSER.declareField((parser, request, value) -> { - request.setScriptType(ScriptService.ScriptType.INLINE); + request.setScriptType(ScriptType.INLINE); if (parser.currentToken() == XContentParser.Token.START_OBJECT) { try (XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType())) { request.setScript(builder.copyCurrentStructure(parser).bytes().utf8ToString()); @@ -100,7 +96,7 @@ public class RestSearchTemplateAction extends BaseRestHandler { } @Override - public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { if (RestActions.hasBodyContent(request) == false) { throw new ElasticsearchException("request body is required"); } @@ -113,7 +109,7 @@ public class RestSearchTemplateAction extends BaseRestHandler { SearchTemplateRequest searchTemplateRequest = parse(RestActions.getRestContent(request)); searchTemplateRequest.setRequest(searchRequest); - client.execute(SearchTemplateAction.INSTANCE, searchTemplateRequest, new RestStatusToXContentListener<>(channel)); + return channel -> client.execute(SearchTemplateAction.INSTANCE, searchTemplateRequest, new RestStatusToXContentListener<>(channel)); } public static SearchTemplateRequest parse(BytesReference bytes) throws IOException { diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java index 1fa7f24de8f..d7ac37f8313 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import java.io.IOException; import java.util.Map; @@ -43,7 +43,7 @@ public class SearchTemplateRequest extends ActionRequest private boolean simulate = false; private boolean explain = false; private boolean profile = false; - private ScriptService.ScriptType scriptType; + private ScriptType scriptType; private String script; private Map scriptParams; @@ -87,11 +87,11 @@ public class SearchTemplateRequest extends ActionRequest this.profile = profile; } - public ScriptService.ScriptType getScriptType() { + public ScriptType getScriptType() { return scriptType; } - public void setScriptType(ScriptService.ScriptType scriptType) { + public void setScriptType(ScriptType scriptType) { this.scriptType = scriptType; } @@ -143,7 +143,7 @@ public class SearchTemplateRequest extends ActionRequest simulate = in.readBoolean(); explain = in.readBoolean(); profile = in.readBoolean(); - scriptType = ScriptService.ScriptType.readFrom(in); + scriptType = ScriptType.readFrom(in); script = in.readOptionalString(); if (in.readBoolean()) { scriptParams = in.readMap(); @@ -157,7 +157,7 @@ public class SearchTemplateRequest extends ActionRequest out.writeBoolean(simulate); out.writeBoolean(explain); out.writeBoolean(profile); - ScriptService.ScriptType.writeTo(scriptType, out); + scriptType.writeTo(out); out.writeOptionalString(script); boolean hasParams = scriptParams != null; out.writeBoolean(hasParams); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java index 52f51b7254f..02d27ac79fc 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java @@ -22,7 +22,7 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import java.util.Map; @@ -63,7 +63,7 @@ public class SearchTemplateRequestBuilder return this; } - public SearchTemplateRequestBuilder setScriptType(ScriptService.ScriptType scriptType) { + public SearchTemplateRequestBuilder setScriptType(ScriptType scriptType) { request.setScriptType(scriptType); return this; } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TemplateQueryBuilder.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TemplateQueryBuilder.java index 4a19bcb4b93..3744416a4ef 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TemplateQueryBuilder.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TemplateQueryBuilder.java @@ -35,10 +35,8 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import java.io.IOException; import java.util.Collections; @@ -59,12 +57,13 @@ public class TemplateQueryBuilder extends AbstractQueryBuilder params) { - this(new Script(template, scriptType, "mustache", params)); + public TemplateQueryBuilder(String template, ScriptType scriptType, Map params) { + this(new Script(scriptType, "mustache", template, params)); } - public TemplateQueryBuilder(String template, ScriptService.ScriptType scriptType, Map params, XContentType ct) { - this(new Script(template, scriptType, "mustache", params, ct)); + public TemplateQueryBuilder(String template, ScriptType scriptType, Map params, XContentType ct) { + this(new Script(scriptType, "mustache", template, + ct == null ? Collections.emptyMap() : Collections.singletonMap(Script.CONTENT_TYPE_OPTION, ct.mediaType()), params)); } TemplateQueryBuilder(Script template) { @@ -120,9 +119,7 @@ public class TemplateQueryBuilder extends AbstractQueryBuilder { @@ -66,8 +67,9 @@ public class TransportSearchTemplateAction extends HandledTransportAction listener) { final SearchTemplateResponse response = new SearchTemplateResponse(); try { - Script script = new Script(request.getScript(), request.getScriptType(), TEMPLATE_LANG, request.getScriptParams()); - ExecutableScript executable = scriptService.executable(script, SEARCH, emptyMap()); + Script script = new Script(request.getScriptType(), TEMPLATE_LANG, request.getScript(), + request.getScriptParams() == null ? Collections.emptyMap() : request.getScriptParams()); + ExecutableScript executable = scriptService.executable(script, SEARCH); BytesReference source = (BytesReference) executable.run(); response.setSource(source); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java new file mode 100644 index 00000000000..c1cba140ff9 --- /dev/null +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script.mustache; + +import com.github.mustachejava.Mustache; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptEngineService; +import org.elasticsearch.test.ESTestCase; + +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.script.ScriptType.INLINE; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.CONTENT_TYPE_PARAM; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.JSON_MIME_TYPE; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.PLAIN_TEXT_MIME_TYPE; +import static org.elasticsearch.script.mustache.CustomMustacheFactory.X_WWW_FORM_URLENCODED_MIME_TYPE; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class CustomMustacheFactoryTests extends ESTestCase { + + public void testCreateEncoder() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> CustomMustacheFactory.createEncoder(null)); + assertThat(e.getMessage(), equalTo("No encoder found for MIME type [null]")); + + e = expectThrows(IllegalArgumentException.class, () -> CustomMustacheFactory.createEncoder("")); + assertThat(e.getMessage(), equalTo("No encoder found for MIME type []")); + + e = expectThrows(IllegalArgumentException.class, () -> CustomMustacheFactory.createEncoder("test")); + assertThat(e.getMessage(), equalTo("No encoder found for MIME type [test]")); + + assertThat(CustomMustacheFactory.createEncoder(CustomMustacheFactory.JSON_MIME_TYPE_WITH_CHARSET), + instanceOf(CustomMustacheFactory.JsonEscapeEncoder.class)); + assertThat(CustomMustacheFactory.createEncoder(CustomMustacheFactory.JSON_MIME_TYPE), + instanceOf(CustomMustacheFactory.JsonEscapeEncoder.class)); + assertThat(CustomMustacheFactory.createEncoder(CustomMustacheFactory.PLAIN_TEXT_MIME_TYPE), + instanceOf(CustomMustacheFactory.DefaultEncoder.class)); + assertThat(CustomMustacheFactory.createEncoder(CustomMustacheFactory.X_WWW_FORM_URLENCODED_MIME_TYPE), + instanceOf(CustomMustacheFactory.UrlEncoder.class)); + } + + public void testJsonEscapeEncoder() { + final ScriptEngineService engine = new MustacheScriptEngineService(Settings.EMPTY); + final Map params = randomBoolean() ? singletonMap(CONTENT_TYPE_PARAM, JSON_MIME_TYPE) : emptyMap(); + + Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params); + CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script); + + ExecutableScript executable = engine.executable(compiled, singletonMap("value", "a \"value\"")); + BytesReference result = (BytesReference) executable.run(); + assertThat(result.utf8ToString(), equalTo("{\"field\": \"a \\\"value\\\"\"}")); + } + + public void testDefaultEncoder() { + final ScriptEngineService engine = new MustacheScriptEngineService(Settings.EMPTY); + final Map params = singletonMap(CONTENT_TYPE_PARAM, PLAIN_TEXT_MIME_TYPE); + + Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params); + CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script); + + ExecutableScript executable = engine.executable(compiled, singletonMap("value", "a \"value\"")); + BytesReference result = (BytesReference) executable.run(); + assertThat(result.utf8ToString(), equalTo("{\"field\": \"a \"value\"\"}")); + } + + public void testUrlEncoder() { + final ScriptEngineService engine = new MustacheScriptEngineService(Settings.EMPTY); + final Map params = singletonMap(CONTENT_TYPE_PARAM, X_WWW_FORM_URLENCODED_MIME_TYPE); + + Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params); + CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script); + + ExecutableScript executable = engine.executable(compiled, singletonMap("value", "tilde~ AND date:[2016 FROM*]")); + BytesReference result = (BytesReference) executable.run(); + assertThat(result.utf8ToString(), equalTo("{\"field\": \"tilde%7E+AND+date%3A%5B2016+FROM*%5D\"}")); + } +} diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/LangMustacheClientYamlTestSuiteIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/LangMustacheClientYamlTestSuiteIT.java index 160327dbab6..377fa870c41 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/LangMustacheClientYamlTestSuiteIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/LangMustacheClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class LangMustacheClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index f5d1a9dd791..91fc4db43dd 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESIntegTestCase; import java.util.Arrays; @@ -74,7 +74,7 @@ public class MultiSearchTemplateIT extends ESIntegTestCase { // Search #1 SearchTemplateRequest search1 = new SearchTemplateRequest(); search1.setRequest(new SearchRequest("msearch")); - search1.setScriptType(ScriptService.ScriptType.INLINE); + search1.setScriptType(ScriptType.INLINE); search1.setScript(template); Map params1 = new HashMap<>(); @@ -87,7 +87,7 @@ public class MultiSearchTemplateIT extends ESIntegTestCase { // Search #2 (Simulate is true) SearchTemplateRequest search2 = new SearchTemplateRequest(); search2.setRequest(new SearchRequest("msearch")); - search2.setScriptType(ScriptService.ScriptType.INLINE); + search2.setScriptType(ScriptType.INLINE); search2.setScript(template); search2.setSimulate(true); @@ -101,7 +101,7 @@ public class MultiSearchTemplateIT extends ESIntegTestCase { // Search #3 SearchTemplateRequest search3 = new SearchTemplateRequest(); search3.setRequest(new SearchRequest("msearch")); - search3.setScriptType(ScriptService.ScriptType.INLINE); + search3.setScriptType(ScriptType.INLINE); search3.setScript(template); search3.setSimulate(false); @@ -115,7 +115,7 @@ public class MultiSearchTemplateIT extends ESIntegTestCase { // Search #4 (Fail because of unknown index) SearchTemplateRequest search4 = new SearchTemplateRequest(); search4.setRequest(new SearchRequest("unknown")); - search4.setScriptType(ScriptService.ScriptType.INLINE); + search4.setScriptType(ScriptType.INLINE); search4.setScript(template); Map params4 = new HashMap<>(); @@ -128,7 +128,7 @@ public class MultiSearchTemplateIT extends ESIntegTestCase { // Search #5 (Simulate is true) SearchTemplateRequest search5 = new SearchTemplateRequest(); search5.setRequest(new SearchRequest("msearch")); - search5.setScriptType(ScriptService.ScriptType.INLINE); + search5.setScriptType(ScriptType.INLINE); search5.setScript("{{! ignore me }}{\"query\":{\"terms\":{\"group\":[{{#groups}}{{.}},{{/groups}}]}}}"); search5.setSimulate(true); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java index d7807c1a268..e3b633006c5 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.FakeRestRequest; @@ -58,9 +58,9 @@ public class MultiSearchTemplateRequestTests extends ESTestCase { assertNotNull(request.requests().get(1).getScript()); assertNotNull(request.requests().get(2).getScript()); - assertEquals(ScriptService.ScriptType.INLINE, request.requests().get(0).getScriptType()); - assertEquals(ScriptService.ScriptType.INLINE, request.requests().get(1).getScriptType()); - assertEquals(ScriptService.ScriptType.INLINE, request.requests().get(2).getScriptType()); + assertEquals(ScriptType.INLINE, request.requests().get(0).getScriptType()); + assertEquals(ScriptType.INLINE, request.requests().get(1).getScriptType()); + assertEquals(ScriptType.INLINE, request.requests().get(2).getScriptType()); assertEquals("{\"query\":{\"match_{{template}}\":{}}}", request.requests().get(0).getScript()); assertEquals("{\"query\":{\"match_{{template}}\":{}}}", request.requests().get(1).getScript()); assertEquals("{\"query\":{\"match_{{template}}\":{}}}", request.requests().get(2).getScript()); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java index 693ada174b9..20211e3935f 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -49,7 +49,7 @@ public class MustacheScriptEngineTests extends ESTestCase { @Before public void setup() { qe = new MustacheScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - factory = new CustomMustacheFactory(true); + factory = new CustomMustacheFactory(); } public void testSimpleParameterReplace() { @@ -59,7 +59,7 @@ public class MustacheScriptEngineTests extends ESTestCase { + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}" + "}}, \"negative_boost\": {{boost_val}} } }}"; Map vars = new HashMap<>(); vars.put("boost_val", "0.3"); - BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", + BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptType.INLINE, "", "mustache", qe.compile(null, template, compileParams)), vars).run(); assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}}," + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}}}, \"negative_boost\": 0.3 } }}", @@ -71,7 +71,7 @@ public class MustacheScriptEngineTests extends ESTestCase { Map vars = new HashMap<>(); vars.put("boost_val", "0.3"); vars.put("body_val", "\"quick brown\""); - BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", + BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptType.INLINE, "", "mustache", qe.compile(null, template, compileParams)), vars).run(); assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}}," + "\"negative\": {\"term\": {\"body\": {\"value\": \"\\\"quick brown\\\"\"}}}, \"negative_boost\": 0.3 } }}", @@ -83,8 +83,8 @@ public class MustacheScriptEngineTests extends ESTestCase { String templateString = "{" + "\"inline\":{\"match_{{template}}\": {}}," + "\"params\":{\"template\":\"all\"}" + "}"; XContentParser parser = XContentFactory.xContent(templateString).createParser(templateString); Script script = Script.parse(parser, new ParseFieldMatcher(false)); - CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, null, "mustache", - qe.compile(null, script.getScript(), Collections.emptyMap())); + CompiledScript compiledScript = new CompiledScript(ScriptType.INLINE, null, "mustache", + qe.compile(null, script.getIdOrCode(), Collections.emptyMap())); ExecutableScript executableScript = qe.executable(compiledScript, script.getParams()); assertThat(((BytesReference) executableScript.run()).utf8ToString(), equalTo("{\"match_all\":{}}")); } @@ -94,8 +94,8 @@ public class MustacheScriptEngineTests extends ESTestCase { + " \"template\":\"all\"," + " \"use_it\": true" + " }" + "}"; XContentParser parser = XContentFactory.xContent(templateString).createParser(templateString); Script script = Script.parse(parser, new ParseFieldMatcher(false)); - CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, null, "mustache", - qe.compile(null, script.getScript(), Collections.emptyMap())); + CompiledScript compiledScript = new CompiledScript(ScriptType.INLINE, null, "mustache", + qe.compile(null, script.getIdOrCode(), Collections.emptyMap())); ExecutableScript executableScript = qe.executable(compiledScript, script.getParams()); assertThat(((BytesReference) executableScript.run()).utf8ToString(), equalTo("{ \"match_all\":{} }")); } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java index 9b48afe834a..ba19febfd21 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java @@ -30,6 +30,8 @@ import org.elasticsearch.script.ScriptEngineService; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -42,9 +44,7 @@ import java.util.Set; import static java.util.Collections.singleton; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.script.ScriptService.ScriptType.INLINE; -import static org.elasticsearch.script.mustache.MustacheScriptEngineService.CONTENT_TYPE_PARAM; -import static org.elasticsearch.script.mustache.MustacheScriptEngineService.PLAIN_TEXT_CONTENT_TYPE; +import static org.elasticsearch.script.ScriptType.INLINE; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -144,24 +144,6 @@ public class MustacheTests extends ESTestCase { assertThat(bytes.utf8ToString(), both(containsString("foo")).and(containsString("bar"))); } - public void testEscaping() { - // json string escaping enabled: - Mustache mustache = (Mustache) engine.compile(null, "{ \"field1\": \"{{value}}\"}", Collections.emptyMap()); - CompiledScript compiledScript = new CompiledScript(INLINE, "name", "mustache", mustache); - ExecutableScript executableScript = engine.executable(compiledScript, Collections.singletonMap("value", "a \"value\"")); - BytesReference rawResult = (BytesReference) executableScript.run(); - String result = rawResult.utf8ToString(); - assertThat(result, equalTo("{ \"field1\": \"a \\\"value\\\"\"}")); - - // json string escaping disabled: - mustache = (Mustache) engine.compile(null, "{ \"field1\": \"{{value}}\"}", - Collections.singletonMap(CONTENT_TYPE_PARAM, PLAIN_TEXT_CONTENT_TYPE)); - compiledScript = new CompiledScript(INLINE, "name", "mustache", mustache); - executableScript = engine.executable(compiledScript, Collections.singletonMap("value", "a \"value\"")); - rawResult = (BytesReference) executableScript.run(); - result = rawResult.utf8ToString(); - assertThat(result, equalTo("{ \"field1\": \"a \"value\"\"}")); - } public void testSizeAccessForCollectionsAndArrays() throws Exception { String[] randomArrayValues = generateRandomStringArray(10, 20, false); @@ -375,6 +357,44 @@ public class MustacheTests extends ESTestCase { assertScript("{{#join delimiter=' and '}}params{{/join delimiter=' and '}}", params, equalTo("1 and 2 and 3 and 4")); } + public void testUrlEncoder() { + Map urls = new HashMap<>(); + urls.put("https://www.elastic.co", + "https%3A%2F%2Fwww.elastic.co"); + urls.put("", + "%3Clogstash-%7Bnow%2Fd%7D%3E"); + urls.put("?query=(foo:A OR baz:B) AND title:/joh?n(ath[oa]n)/ AND date:{* TO 2012-01}", + "%3Fquery%3D%28foo%3AA+OR+baz%3AB%29+AND+title%3A%2Fjoh%3Fn%28ath%5Boa%5Dn%29%2F+AND+date%3A%7B*+TO+2012-01%7D"); + + for (Map.Entry url : urls.entrySet()) { + assertScript("{{#url}}{{params}}{{/url}}", singletonMap("params", url.getKey()), equalTo(url.getValue())); + } + } + + public void testUrlEncoderWithParam() throws Exception { + assertScript("{{#url}}{{index}}{{/url}}", singletonMap("index", ""), + equalTo("%3Clogstash-%7Bnow%2Fd%7BYYYY.MM.dd%7C%2B12%3A00%7D%7D%3E")); + + final String random = randomAsciiOfLength(10); + assertScript("{{#url}}prefix_{{s}}{{/url}}", singletonMap("s", random), + equalTo("prefix_" + URLEncoder.encode(random, StandardCharsets.UTF_8.name()))); + } + + public void testUrlEncoderWithJoin() { + Map params = singletonMap("emails", Arrays.asList("john@smith.com", "john.smith@email.com", "jsmith@email.com")); + assertScript("?query={{#url}}{{#join}}emails{{/join}}{{/url}}", params, + equalTo("?query=john%40smith.com%2Cjohn.smith%40email.com%2Cjsmith%40email.com")); + + params = singletonMap("indices", new String[]{"", "", ""}); + assertScript("{{#url}}https://localhost:9200/{{#join}}indices{{/join}}/_stats{{/url}}", params, + equalTo("https%3A%2F%2Flocalhost%3A9200%2F%3Clogstash-%7Bnow%2Fd-2d%7D" + + "%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogstash-%7Bnow%2Fd%7D%3E%2F_stats")); + + params = singletonMap("fibonacci", new int[]{1, 1, 2, 3, 5, 8, 13, 21, 34, 55}); + assertScript("{{#url}}{{#join delimiter='+'}}fibonacci{{/join delimiter='+'}}{{/url}}", params, + equalTo("1%2B1%2B2%2B3%2B5%2B8%2B13%2B21%2B34%2B55")); + } + private void assertScript(String script, Map vars, Matcher matcher) { Object result = engine.executable(new CompiledScript(INLINE, "inline", "mustache", compile(script)), vars).run(); assertThat(result, notNullValue()); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index c3656029bc4..fb67c561b83 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -25,8 +25,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESSingleNodeTestCase; import org.junit.Before; @@ -269,14 +268,14 @@ public class SearchTemplateIT extends ESSingleNodeTestCase { templateParams.put("fieldParam", "bar"); searchResponse = new SearchTemplateRequestBuilder(client()) .setRequest(new SearchRequest("test").types("type")) - .setScript("/mustache/2").setScriptType(ScriptService.ScriptType.STORED).setScriptParams(templateParams) + .setScript("/mustache/2").setScriptType(ScriptType.STORED).setScriptParams(templateParams) .get(); assertHitCount(searchResponse.getResponse(), 1); Map vars = new HashMap<>(); vars.put("fieldParam", "bar"); - TemplateQueryBuilder builder = new TemplateQueryBuilder("3", ScriptService.ScriptType.STORED, vars); + TemplateQueryBuilder builder = new TemplateQueryBuilder("3", ScriptType.STORED, vars); SearchResponse sr = client().prepareSearch().setQuery(builder) .execute().actionGet(); assertHitCount(sr, 1); @@ -309,7 +308,7 @@ public class SearchTemplateIT extends ESSingleNodeTestCase { ParsingException e = expectThrows(ParsingException.class, () -> new SearchTemplateRequestBuilder(client()) .setRequest(new SearchRequest("testindex").types("test")) - .setScript("git01").setScriptType(ScriptService.ScriptType.STORED).setScriptParams(templateParams) + .setScript("git01").setScriptType(ScriptType.STORED).setScriptParams(templateParams) .get()); assertThat(e.getMessage(), containsString("[match] query does not support type ooophrase_prefix")); @@ -321,7 +320,7 @@ public class SearchTemplateIT extends ESSingleNodeTestCase { SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()) .setRequest(new SearchRequest("testindex").types("test")) - .setScript("git01").setScriptType(ScriptService.ScriptType.STORED).setScriptParams(templateParams) + .setScript("git01").setScriptType(ScriptType.STORED).setScriptParams(templateParams) .get(); assertHitCount(searchResponse.getResponse(), 1); } @@ -350,7 +349,7 @@ public class SearchTemplateIT extends ESSingleNodeTestCase { SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()) .setRequest(new SearchRequest("test").types("type")) - .setScript("/mustache/4").setScriptType(ScriptService.ScriptType.STORED).setScriptParams(arrayTemplateParams) + .setScript("/mustache/4").setScriptType(ScriptType.STORED).setScriptParams(arrayTemplateParams) .get(); assertHitCount(searchResponse.getResponse(), 5); } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java index b5831a3c5a8..528f87f1dd2 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java @@ -22,7 +22,7 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import java.util.List; @@ -52,7 +52,7 @@ public class SearchTemplateRequestTests extends ESTestCase { SearchTemplateRequest request = RestSearchTemplateAction.parse(newBytesReference(source)); assertThat(request.getScript(), equalTo("{\"query\":{\"terms\":{\"status\":[\"{{#status}}\",\"{{.}}\",\"{{/status}}\"]}}}")); - assertThat(request.getScriptType(), equalTo(ScriptService.ScriptType.INLINE)); + assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); assertThat(request.getScriptParams(), nullValue()); } @@ -71,7 +71,7 @@ public class SearchTemplateRequestTests extends ESTestCase { SearchTemplateRequest request = RestSearchTemplateAction.parse(newBytesReference(source)); assertThat(request.getScript(), equalTo("{\"query\":{\"match\":{\"{{my_field}}\":\"{{my_value}}\"}},\"size\":\"{{my_size}}\"}")); - assertThat(request.getScriptType(), equalTo(ScriptService.ScriptType.INLINE)); + assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); assertThat(request.getScriptParams().size(), equalTo(3)); assertThat(request.getScriptParams(), hasEntry("my_field", "foo")); assertThat(request.getScriptParams(), hasEntry("my_value", "bar")); @@ -83,7 +83,7 @@ public class SearchTemplateRequestTests extends ESTestCase { SearchTemplateRequest request = RestSearchTemplateAction.parse(newBytesReference(source)); assertThat(request.getScript(), equalTo("{\"query\":{\"bool\":{\"must\":{\"match\":{\"foo\":\"{{text}}\"}}}}}")); - assertThat(request.getScriptType(), equalTo(ScriptService.ScriptType.INLINE)); + assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); assertThat(request.getScriptParams(), nullValue()); } @@ -94,7 +94,7 @@ public class SearchTemplateRequestTests extends ESTestCase { SearchTemplateRequest request = RestSearchTemplateAction.parse(newBytesReference(source)); assertThat(request.getScript(), equalTo("{\"query\":{\"match\":{\"{{field}}\":\"{{value}}\"}}}")); - assertThat(request.getScriptType(), equalTo(ScriptService.ScriptType.INLINE)); + assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); assertThat(request.getScriptParams().size(), equalTo(1)); assertThat(request.getScriptParams(), hasKey("status")); assertThat((List) request.getScriptParams().get("status"), hasItems("pending", "published")); @@ -105,7 +105,7 @@ public class SearchTemplateRequestTests extends ESTestCase { SearchTemplateRequest request = RestSearchTemplateAction.parse(newBytesReference(source)); assertThat(request.getScript(), equalTo("fileTemplate")); - assertThat(request.getScriptType(), equalTo(ScriptService.ScriptType.FILE)); + assertThat(request.getScriptType(), equalTo(ScriptType.FILE)); assertThat(request.getScriptParams(), nullValue()); } @@ -114,7 +114,7 @@ public class SearchTemplateRequestTests extends ESTestCase { SearchTemplateRequest request = RestSearchTemplateAction.parse(newBytesReference(source)); assertThat(request.getScript(), equalTo("template_foo")); - assertThat(request.getScriptType(), equalTo(ScriptService.ScriptType.FILE)); + assertThat(request.getScriptType(), equalTo(ScriptType.FILE)); assertThat(request.getScriptParams().size(), equalTo(2)); assertThat(request.getScriptParams(), hasEntry("foo", "bar")); assertThat(request.getScriptParams(), hasEntry("size", 500)); @@ -125,7 +125,7 @@ public class SearchTemplateRequestTests extends ESTestCase { SearchTemplateRequest request = RestSearchTemplateAction.parse(newBytesReference(source)); assertThat(request.getScript(), equalTo("storedTemplate")); - assertThat(request.getScriptType(), equalTo(ScriptService.ScriptType.STORED)); + assertThat(request.getScriptType(), equalTo(ScriptType.STORED)); assertThat(request.getScriptParams(), nullValue()); } @@ -134,7 +134,7 @@ public class SearchTemplateRequestTests extends ESTestCase { SearchTemplateRequest request = RestSearchTemplateAction.parse(newBytesReference(source)); assertThat(request.getScript(), equalTo("another_template")); - assertThat(request.getScriptType(), equalTo(ScriptService.ScriptType.STORED)); + assertThat(request.getScriptType(), equalTo(ScriptType.STORED)); assertThat(request.getScriptParams().size(), equalTo(1)); assertThat(request.getScriptParams(), hasEntry("bar", "foo")); } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java index dd5c0a18328..7bff3f59842 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.script.mustache; import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -36,8 +35,10 @@ import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; +import org.junit.After; import org.junit.Before; import java.io.IOException; @@ -48,6 +49,8 @@ import java.util.HashMap; import java.util.Map; import java.util.function.Function; +import static org.hamcrest.Matchers.containsString; + public class TemplateQueryBuilderTests extends AbstractQueryTestCase { /** @@ -55,6 +58,14 @@ public class TemplateQueryBuilderTests extends AbstractQueryTestCase> getPlugins() { return Arrays.asList(MustachePlugin.class, CustomScriptPlugin.class); @@ -104,12 +115,13 @@ public class TemplateQueryBuilderTests extends AbstractQueryTestCase + { + "inline": { + "query": { + "match": { + "url": "https://localhost:9200/{{#url}}{{index}}{{/url}}/{{#url}}{{type}}{{/url}}/_search" + } + } + }, + "params": { + "index": "", + "type" : "métriques" + } + } + + - match: { template_output.query.match.url: "https://localhost:9200/%3Clogstash-%7Bnow%2Fd-2d%7D%3E/m%C3%A9triques/_search" } + +--- +"Rendering using {{url}} and {{join}} functions": + + - do: + render_search_template: + body: > + { + "inline": { + "query": { + "match": { + "url": "{{#url}}https://localhost:9200/{{#join}}indices{{/join}}/_stats{{/url}}" + } + } + }, + "params": { + "indices": ["", "", ""] + } + } + + # Decoded URL is https://localhost:9200/,,/_stats + - match: { template_output.query.match.url: "https%3A%2F%2Flocalhost%3A9200%2F%3Clogstash-%7Bnow%2Fd-2d%7D%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogstash-%7Bnow%2Fd%7D%3E%2F_stats" } diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml index 2360dfc37f0..cfa97b8bc9f 100644 --- a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml +++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml @@ -44,7 +44,7 @@ warnings: - '[template] query is deprecated, use search template api instead' search: - body: { "query": { "template": { "id": "1", "params": { "my_value": "value1" } } } } + body: { "query": { "template": { "stored": "1", "params": { "my_value": "value1" } } } } - match: { hits.total: 1 } @@ -52,7 +52,7 @@ warnings: - '[template] query is deprecated, use search template api instead' search: - body: { "query": { "template": { "id": "/mustache/1", "params": { "my_value": "value1" } } } } + body: { "query": { "template": { "stored": "/mustache/1", "params": { "my_value": "value1" } } } } - match: { hits.total: 1 } diff --git a/modules/lang-painless/src/main/antlr/PainlessParser.g4 b/modules/lang-painless/src/main/antlr/PainlessParser.g4 index 42876b18f0d..cca96e65b8b 100644 --- a/modules/lang-painless/src/main/antlr/PainlessParser.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessParser.g4 @@ -156,11 +156,11 @@ postdot ; callinvoke - : DOT DOTID arguments + : COND? DOT DOTID arguments ; fieldaccess - : DOT ( DOTID | DOTINTEGER ) + : COND? DOT ( DOTID | DOTINTEGER ) ; braceaccess diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java index 864b44fb766..b3c1a3caea1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java @@ -30,6 +30,9 @@ import org.elasticsearch.painless.Definition.Type; public final class AnalyzerCaster { public static Cast getLegalCast(Location location, Type actual, Type expected, boolean explicit, boolean internal) { + if (actual == null || expected == null) { + throw new IllegalStateException("Neither actual [" + actual + "] nor expected [" + expected + "] can be null"); + } if (actual.equals(expected)) { return null; } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java index cd761d0ad44..6de116da0e9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java @@ -109,6 +109,10 @@ public final class Def { private static final MethodHandle LIST_SET; /** pointer to Iterable.iterator() */ private static final MethodHandle ITERATOR; + /** pointer to {@link Def#mapIndexNormalize}. */ + private static final MethodHandle MAP_INDEX_NORMALIZE; + /** pointer to {@link Def#listIndexNormalize}. */ + private static final MethodHandle LIST_INDEX_NORMALIZE; /** factory for arraylength MethodHandle (intrinsic) from Java 9 */ private static final MethodHandle JAVA9_ARRAY_LENGTH_MH_FACTORY; @@ -121,6 +125,10 @@ public final class Def { LIST_GET = lookup.findVirtual(List.class, "get", MethodType.methodType(Object.class, int.class)); LIST_SET = lookup.findVirtual(List.class, "set", MethodType.methodType(Object.class, int.class, Object.class)); ITERATOR = lookup.findVirtual(Iterable.class, "iterator", MethodType.methodType(Iterator.class)); + MAP_INDEX_NORMALIZE = lookup.findStatic(Def.class, "mapIndexNormalize", + MethodType.methodType(Object.class, Map.class, Object.class)); + LIST_INDEX_NORMALIZE = lookup.findStatic(Def.class, "listIndexNormalize", + MethodType.methodType(int.class, List.class, int.class)); } catch (final ReflectiveOperationException roe) { throw new AssertionError(roe); } @@ -522,6 +530,26 @@ public final class Def { "for class [" + receiverClass.getCanonicalName() + "]."); } + /** + * Returns a method handle to normalize the index into an array. This is what makes lists and arrays stored in {@code def} support + * negative offsets. + * @param receiverClass Class of the array to store the value in + * @return a MethodHandle that accepts the receiver as first argument, the index as second argument, and returns the normalized index + * to use with array loads and array stores + */ + static MethodHandle lookupIndexNormalize(Class receiverClass) { + if (receiverClass.isArray()) { + return ArrayIndexNormalizeHelper.arrayIndexNormalizer(receiverClass); + } else if (Map.class.isAssignableFrom(receiverClass)) { + // noop so that mymap[key] doesn't do funny things with negative keys + return MAP_INDEX_NORMALIZE; + } else if (List.class.isAssignableFrom(receiverClass)) { + return LIST_INDEX_NORMALIZE; + } + throw new IllegalArgumentException("Attempting to address a non-array-like type " + + "[" + receiverClass.getCanonicalName() + "] as an array."); + } + /** * Returns a method handle to do an array store. * @param receiverClass Class of the array to store the value in @@ -814,4 +842,62 @@ public final class Def { return ((Number)value).doubleValue(); } } + + /** + * "Normalizes" the index into a {@code Map} by making no change to the index. + */ + public static Object mapIndexNormalize(final Map value, Object index) { + return index; + } + + /** + * "Normalizes" the idnex into a {@code List} by flipping negative indexes around so they are "from the end" of the list. + */ + public static int listIndexNormalize(final List value, int index) { + return index >= 0 ? index : value.size() + index; + } + + /** + * Methods to normalize array indices to support negative indices into arrays stored in {@code def}s. + */ + @SuppressWarnings("unused") // normalizeIndex() methods are are actually used, javac just does not know :) + private static final class ArrayIndexNormalizeHelper { + private static final Lookup PRIV_LOOKUP = MethodHandles.lookup(); + + private static final Map,MethodHandle> ARRAY_TYPE_MH_MAPPING = Collections.unmodifiableMap( + Stream.of(boolean[].class, byte[].class, short[].class, int[].class, long[].class, + char[].class, float[].class, double[].class, Object[].class) + .collect(Collectors.toMap(Function.identity(), type -> { + try { + return PRIV_LOOKUP.findStatic(PRIV_LOOKUP.lookupClass(), "normalizeIndex", + MethodType.methodType(int.class, type, int.class)); + } catch (ReflectiveOperationException e) { + throw new AssertionError(e); + } + })) + ); + + private static final MethodHandle OBJECT_ARRAY_MH = ARRAY_TYPE_MH_MAPPING.get(Object[].class); + + static int normalizeIndex(final boolean[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final byte[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final short[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final int[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final long[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final char[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final float[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final double[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final Object[] array, final int index) { return index >= 0 ? index : index + array.length; } + + static MethodHandle arrayIndexNormalizer(Class arrayType) { + if (!arrayType.isArray()) { + throw new IllegalArgumentException("type must be an array"); + } + return (ARRAY_TYPE_MH_MAPPING.containsKey(arrayType)) ? + ARRAY_TYPE_MH_MAPPING.get(arrayType) : + OBJECT_ARRAY_MH.asType(OBJECT_ARRAY_MH.type().changeParameterType(0, arrayType)); + } + + private ArrayIndexNormalizeHelper() {} + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java index 9640629cb87..307316efdf4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java @@ -32,9 +32,10 @@ import java.lang.invoke.WrongMethodTypeException; /** * Painless invokedynamic bootstrap for the call site. *

    - * Has 7 flavors (passed as static bootstrap parameters): dynamic method call, + * Has 11 flavors (passed as static bootstrap parameters): dynamic method call, * dynamic field load (getter), and dynamic field store (setter), dynamic array load, - * dynamic array store, iterator, and method reference. + * dynamic array store, iterator, method reference, unary operator, binary operator, + * shift operator, and dynamic array index normalize. *

    * When a new type is encountered at the call site, we lookup from the appropriate * whitelist, and cache with a guard. If we encounter too many types, we stop caching. @@ -69,6 +70,8 @@ public final class DefBootstrap { public static final int BINARY_OPERATOR = 8; /** static bootstrap parameter indicating a shift operator, e.g. foo >> bar */ public static final int SHIFT_OPERATOR = 9; + /** static bootstrap parameter indicating a request to normalize an index for array-like-access */ + public static final int INDEX_NORMALIZE = 10; // constants for the flags parameter of operators /** @@ -152,6 +155,8 @@ public final class DefBootstrap { return Def.lookupIterator(receiver); case REFERENCE: return Def.lookupReference(lookup, (String) args[0], receiver, name); + case INDEX_NORMALIZE: + return Def.lookupIndexNormalize(receiver); default: throw new AssertionError(); } } @@ -448,6 +453,7 @@ public final class DefBootstrap { case ARRAY_LOAD: case ARRAY_STORE: case ITERATOR: + case INDEX_NORMALIZE: if (args.length > 0) { throw new BootstrapMethodError("Illegal static bootstrap parameters for flavor: " + flavor); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java index 43fd54c51a4..7e56bf49156 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java @@ -30,7 +30,6 @@ import org.objectweb.asm.commons.Method; import java.util.ArrayDeque; import java.util.ArrayList; -import java.util.Arrays; import java.util.BitSet; import java.util.Deque; import java.util.List; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java index c546207b1ee..684f9a59ee2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java @@ -32,6 +32,7 @@ import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; import java.util.BitSet; +import java.util.Collection; import java.util.Iterator; import java.util.Map; import java.util.Objects; @@ -112,6 +113,7 @@ public final class WriterConstants { public static final Method DEF_TO_LONG_EXPLICIT = getAsmMethod(long.class , "DefTolongExplicit" , Object.class); public static final Method DEF_TO_FLOAT_EXPLICIT = getAsmMethod(float.class , "DefTofloatExplicit" , Object.class); public static final Method DEF_TO_DOUBLE_EXPLICIT = getAsmMethod(double.class , "DefTodoubleExplicit", Object.class); + public static final Type DEF_ARRAY_LENGTH_METHOD_TYPE = Type.getMethodType(Type.INT_TYPE, Definition.DEF_TYPE.type); /** invokedynamic bootstrap for lambda expression/method references */ public static final MethodType LAMBDA_BOOTSTRAP_TYPE = @@ -158,6 +160,9 @@ public final class WriterConstants { public static final Type OBJECTS_TYPE = Type.getType(Objects.class); public static final Method EQUALS = getAsmMethod(boolean.class, "equals", Object.class, Object.class); + public static final Type COLLECTION_TYPE = Type.getType(Collection.class); + public static final Method COLLECTION_SIZE = getAsmMethod(int.class, "size"); + private static Method getAsmMethod(final Class rtype, final String name, final Class... ptypes) { return new Method(name, MethodType.methodType(rtype, ptypes).toMethodDescriptorString()); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java index 8766dc9f89c..964ef714838 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java @@ -2558,6 +2558,7 @@ class PainlessParser extends Parser { public ArgumentsContext arguments() { return getRuleContext(ArgumentsContext.class,0); } + public TerminalNode COND() { return getToken(PainlessParser.COND, 0); } public CallinvokeContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -2572,14 +2573,24 @@ class PainlessParser extends Parser { public final CallinvokeContext callinvoke() throws RecognitionException { CallinvokeContext _localctx = new CallinvokeContext(_ctx, getState()); enterRule(_localctx, 40, RULE_callinvoke); + int _la; try { enterOuterAlt(_localctx, 1); { - setState(354); - match(DOT); setState(355); + _la = _input.LA(1); + if (_la==COND) { + { + setState(354); + match(COND); + } + } + + setState(357); + match(DOT); + setState(358); match(DOTID); - setState(356); + setState(359); arguments(); } } @@ -2598,6 +2609,7 @@ class PainlessParser extends Parser { public TerminalNode DOT() { return getToken(PainlessParser.DOT, 0); } public TerminalNode DOTID() { return getToken(PainlessParser.DOTID, 0); } public TerminalNode DOTINTEGER() { return getToken(PainlessParser.DOTINTEGER, 0); } + public TerminalNode COND() { return getToken(PainlessParser.COND, 0); } public FieldaccessContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -2616,9 +2628,18 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(358); + setState(362); + _la = _input.LA(1); + if (_la==COND) { + { + setState(361); + match(COND); + } + } + + setState(364); match(DOT); - setState(359); + setState(365); _la = _input.LA(1); if ( !(_la==DOTINTEGER || _la==DOTID) ) { _errHandler.recoverInline(this); @@ -2661,11 +2682,11 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(361); + setState(367); match(LBRACE); - setState(362); + setState(368); expression(0); - setState(363); + setState(369); match(RBRACE); } } @@ -2762,17 +2783,17 @@ class PainlessParser extends Parser { int _la; try { int _alt; - setState(409); - switch ( getInterpreter().adaptivePredict(_input,34,_ctx) ) { + setState(415); + switch ( getInterpreter().adaptivePredict(_input,36,_ctx) ) { case 1: _localctx = new NewstandardarrayContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(365); + setState(371); match(NEW); - setState(366); + setState(372); match(TYPE); - setState(371); + setState(377); _errHandler.sync(this); _alt = 1; do { @@ -2780,11 +2801,11 @@ class PainlessParser extends Parser { case 1: { { - setState(367); + setState(373); match(LBRACE); - setState(368); + setState(374); expression(0); - setState(369); + setState(375); match(RBRACE); } } @@ -2792,31 +2813,31 @@ class PainlessParser extends Parser { default: throw new NoViableAltException(this); } - setState(373); + setState(379); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,27,_ctx); + _alt = getInterpreter().adaptivePredict(_input,29,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); - setState(382); - switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { + setState(388); + switch ( getInterpreter().adaptivePredict(_input,31,_ctx) ) { case 1: { - setState(375); + setState(381); postdot(); - setState(379); + setState(385); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,28,_ctx); + _alt = getInterpreter().adaptivePredict(_input,30,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(376); + setState(382); postfix(); } } } - setState(381); + setState(387); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,28,_ctx); + _alt = getInterpreter().adaptivePredict(_input,30,_ctx); } } break; @@ -2827,67 +2848,67 @@ class PainlessParser extends Parser { _localctx = new NewinitializedarrayContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(384); + setState(390); match(NEW); - setState(385); + setState(391); match(TYPE); - setState(386); + setState(392); match(LBRACE); - setState(387); + setState(393); match(RBRACE); - setState(388); + setState(394); match(LBRACK); - setState(397); + setState(403); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OCTAL - 70)) | (1L << (HEX - 70)) | (1L << (INTEGER - 70)) | (1L << (DECIMAL - 70)) | (1L << (STRING - 70)) | (1L << (REGEX - 70)) | (1L << (TRUE - 70)) | (1L << (FALSE - 70)) | (1L << (NULL - 70)) | (1L << (TYPE - 70)) | (1L << (ID - 70)))) != 0)) { { - setState(389); + setState(395); expression(0); - setState(394); + setState(400); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(390); + setState(396); match(COMMA); - setState(391); + setState(397); expression(0); } } - setState(396); + setState(402); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(400); + setState(406); _la = _input.LA(1); if (_la==SEMICOLON) { { - setState(399); + setState(405); match(SEMICOLON); } } - setState(402); + setState(408); match(RBRACK); - setState(406); + setState(412); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,33,_ctx); + _alt = getInterpreter().adaptivePredict(_input,35,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(403); + setState(409); postfix(); } } } - setState(408); + setState(414); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,33,_ctx); + _alt = getInterpreter().adaptivePredict(_input,35,_ctx); } } break; @@ -2933,41 +2954,41 @@ class PainlessParser extends Parser { enterRule(_localctx, 48, RULE_listinitializer); int _la; try { - setState(424); - switch ( getInterpreter().adaptivePredict(_input,36,_ctx) ) { + setState(430); + switch ( getInterpreter().adaptivePredict(_input,38,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(411); - match(LBRACE); - setState(412); - expression(0); setState(417); + match(LBRACE); + setState(418); + expression(0); + setState(423); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(413); + setState(419); match(COMMA); - setState(414); + setState(420); expression(0); } } - setState(419); + setState(425); _errHandler.sync(this); _la = _input.LA(1); } - setState(420); + setState(426); match(RBRACE); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(422); + setState(428); match(LBRACE); - setState(423); + setState(429); match(RBRACE); } break; @@ -3014,43 +3035,43 @@ class PainlessParser extends Parser { enterRule(_localctx, 50, RULE_mapinitializer); int _la; try { - setState(440); - switch ( getInterpreter().adaptivePredict(_input,38,_ctx) ) { + setState(446); + switch ( getInterpreter().adaptivePredict(_input,40,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(426); - match(LBRACE); - setState(427); - maptoken(); setState(432); + match(LBRACE); + setState(433); + maptoken(); + setState(438); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(428); + setState(434); match(COMMA); - setState(429); + setState(435); maptoken(); } } - setState(434); + setState(440); _errHandler.sync(this); _la = _input.LA(1); } - setState(435); + setState(441); match(RBRACE); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(437); + setState(443); match(LBRACE); - setState(438); + setState(444); match(COLON); - setState(439); + setState(445); match(RBRACE); } break; @@ -3092,11 +3113,11 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(442); + setState(448); expression(0); - setState(443); + setState(449); match(COLON); - setState(444); + setState(450); expression(0); } } @@ -3143,34 +3164,34 @@ class PainlessParser extends Parser { enterOuterAlt(_localctx, 1); { { - setState(446); + setState(452); match(LP); - setState(455); + setState(461); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << THIS) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OCTAL - 70)) | (1L << (HEX - 70)) | (1L << (INTEGER - 70)) | (1L << (DECIMAL - 70)) | (1L << (STRING - 70)) | (1L << (REGEX - 70)) | (1L << (TRUE - 70)) | (1L << (FALSE - 70)) | (1L << (NULL - 70)) | (1L << (TYPE - 70)) | (1L << (ID - 70)))) != 0)) { { - setState(447); + setState(453); argument(); - setState(452); + setState(458); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(448); + setState(454); match(COMMA); - setState(449); + setState(455); argument(); } } - setState(454); + setState(460); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(457); + setState(463); match(RP); } } @@ -3211,26 +3232,26 @@ class PainlessParser extends Parser { ArgumentContext _localctx = new ArgumentContext(_ctx, getState()); enterRule(_localctx, 56, RULE_argument); try { - setState(462); - switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { + setState(468); + switch ( getInterpreter().adaptivePredict(_input,43,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(459); + setState(465); expression(0); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(460); + setState(466); lambda(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(461); + setState(467); funcref(); } break; @@ -3285,58 +3306,58 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(477); + setState(483); switch (_input.LA(1)) { case TYPE: case ID: { - setState(464); + setState(470); lamtype(); } break; case LP: { - setState(465); + setState(471); match(LP); - setState(474); + setState(480); _la = _input.LA(1); if (_la==TYPE || _la==ID) { { - setState(466); + setState(472); lamtype(); - setState(471); + setState(477); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(467); + setState(473); match(COMMA); - setState(468); + setState(474); lamtype(); } } - setState(473); + setState(479); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(476); + setState(482); match(RP); } break; default: throw new NoViableAltException(this); } - setState(479); + setState(485); match(ARROW); - setState(482); + setState(488); switch (_input.LA(1)) { case LBRACK: { - setState(480); + setState(486); block(); } break; @@ -3361,7 +3382,7 @@ class PainlessParser extends Parser { case TYPE: case ID: { - setState(481); + setState(487); expression(0); } break; @@ -3404,16 +3425,16 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(485); + setState(491); _la = _input.LA(1); if (_la==TYPE) { { - setState(484); + setState(490); decltype(); } } - setState(487); + setState(493); match(ID); } } @@ -3492,17 +3513,17 @@ class PainlessParser extends Parser { FuncrefContext _localctx = new FuncrefContext(_ctx, getState()); enterRule(_localctx, 62, RULE_funcref); try { - setState(502); - switch ( getInterpreter().adaptivePredict(_input,47,_ctx) ) { + setState(508); + switch ( getInterpreter().adaptivePredict(_input,49,_ctx) ) { case 1: _localctx = new ClassfuncrefContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(489); + setState(495); match(TYPE); - setState(490); + setState(496); match(REF); - setState(491); + setState(497); match(ID); } break; @@ -3510,11 +3531,11 @@ class PainlessParser extends Parser { _localctx = new ConstructorfuncrefContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(492); + setState(498); decltype(); - setState(493); + setState(499); match(REF); - setState(494); + setState(500); match(NEW); } break; @@ -3522,11 +3543,11 @@ class PainlessParser extends Parser { _localctx = new CapturingfuncrefContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(496); + setState(502); match(ID); - setState(497); + setState(503); match(REF); - setState(498); + setState(504); match(ID); } break; @@ -3534,11 +3555,11 @@ class PainlessParser extends Parser { _localctx = new LocalfuncrefContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(499); + setState(505); match(THIS); - setState(500); + setState(506); match(REF); - setState(501); + setState(507); match(ID); } break; @@ -3606,7 +3627,7 @@ class PainlessParser extends Parser { } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3T\u01fb\4\2\t\2\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3T\u0201\4\2\t\2\4"+ "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ @@ -3632,173 +3653,176 @@ class PainlessParser extends Parser { "\3\22\7\22\u0140\n\22\f\22\16\22\u0143\13\22\3\22\5\22\u0146\n\22\3\23"+ "\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23"+ "\3\23\3\23\3\23\5\23\u015a\n\23\3\24\3\24\3\24\5\24\u015f\n\24\3\25\3"+ - "\25\5\25\u0163\n\25\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\30\3\30\3\30"+ - "\3\30\3\31\3\31\3\31\3\31\3\31\3\31\6\31\u0176\n\31\r\31\16\31\u0177\3"+ - "\31\3\31\7\31\u017c\n\31\f\31\16\31\u017f\13\31\5\31\u0181\n\31\3\31\3"+ - "\31\3\31\3\31\3\31\3\31\3\31\3\31\7\31\u018b\n\31\f\31\16\31\u018e\13"+ - "\31\5\31\u0190\n\31\3\31\5\31\u0193\n\31\3\31\3\31\7\31\u0197\n\31\f\31"+ - "\16\31\u019a\13\31\5\31\u019c\n\31\3\32\3\32\3\32\3\32\7\32\u01a2\n\32"+ - "\f\32\16\32\u01a5\13\32\3\32\3\32\3\32\3\32\5\32\u01ab\n\32\3\33\3\33"+ - "\3\33\3\33\7\33\u01b1\n\33\f\33\16\33\u01b4\13\33\3\33\3\33\3\33\3\33"+ - "\3\33\5\33\u01bb\n\33\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\7\35\u01c5"+ - "\n\35\f\35\16\35\u01c8\13\35\5\35\u01ca\n\35\3\35\3\35\3\36\3\36\3\36"+ - "\5\36\u01d1\n\36\3\37\3\37\3\37\3\37\3\37\7\37\u01d8\n\37\f\37\16\37\u01db"+ - "\13\37\5\37\u01dd\n\37\3\37\5\37\u01e0\n\37\3\37\3\37\3\37\5\37\u01e5"+ - "\n\37\3 \5 \u01e8\n \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\5!\u01f9"+ - "\n!\3!\2\3\36\"\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62"+ - "\64\668:<>@\2\16\3\3\r\r\3\2\37!\3\2\"#\3\289\3\2$&\3\2\'*\3\2+.\3\2<"+ - "G\3\2:;\4\2\35\36\"#\3\2HK\3\2ST\u0233\2E\3\2\2\2\4P\3\2\2\2\6U\3\2\2"+ - "\2\b\u00bb\3\2\2\2\n\u00bf\3\2\2\2\f\u00c1\3\2\2\2\16\u00ca\3\2\2\2\20"+ - "\u00ce\3\2\2\2\22\u00d0\3\2\2\2\24\u00d2\3\2\2\2\26\u00db\3\2\2\2\30\u00e3"+ - "\3\2\2\2\32\u00e8\3\2\2\2\34\u00ef\3\2\2\2\36\u00f1\3\2\2\2 \u0133\3\2"+ - "\2\2\"\u0145\3\2\2\2$\u0159\3\2\2\2&\u015e\3\2\2\2(\u0162\3\2\2\2*\u0164"+ - "\3\2\2\2,\u0168\3\2\2\2.\u016b\3\2\2\2\60\u019b\3\2\2\2\62\u01aa\3\2\2"+ - "\2\64\u01ba\3\2\2\2\66\u01bc\3\2\2\28\u01c0\3\2\2\2:\u01d0\3\2\2\2<\u01df"+ - "\3\2\2\2>\u01e7\3\2\2\2@\u01f8\3\2\2\2BD\5\4\3\2CB\3\2\2\2DG\3\2\2\2E"+ - "C\3\2\2\2EF\3\2\2\2FK\3\2\2\2GE\3\2\2\2HJ\5\b\5\2IH\3\2\2\2JM\3\2\2\2"+ - "KI\3\2\2\2KL\3\2\2\2LN\3\2\2\2MK\3\2\2\2NO\7\2\2\3O\3\3\2\2\2PQ\5\26\f"+ - "\2QR\7R\2\2RS\5\6\4\2ST\5\f\7\2T\5\3\2\2\2Ua\7\t\2\2VW\5\26\f\2W^\7R\2"+ - "\2XY\7\f\2\2YZ\5\26\f\2Z[\7R\2\2[]\3\2\2\2\\X\3\2\2\2]`\3\2\2\2^\\\3\2"+ - "\2\2^_\3\2\2\2_b\3\2\2\2`^\3\2\2\2aV\3\2\2\2ab\3\2\2\2bc\3\2\2\2cd\7\n"+ - "\2\2d\7\3\2\2\2ef\7\16\2\2fg\7\t\2\2gh\5\36\20\2hi\7\n\2\2im\5\n\6\2j"+ - "k\7\20\2\2kn\5\n\6\2ln\6\5\2\2mj\3\2\2\2ml\3\2\2\2n\u00bc\3\2\2\2op\7"+ - "\21\2\2pq\7\t\2\2qr\5\36\20\2ru\7\n\2\2sv\5\n\6\2tv\5\16\b\2us\3\2\2\2"+ - "ut\3\2\2\2v\u00bc\3\2\2\2wx\7\22\2\2xy\5\f\7\2yz\7\21\2\2z{\7\t\2\2{|"+ - "\5\36\20\2|}\7\n\2\2}~\5\34\17\2~\u00bc\3\2\2\2\177\u0080\7\23\2\2\u0080"+ - "\u0082\7\t\2\2\u0081\u0083\5\20\t\2\u0082\u0081\3\2\2\2\u0082\u0083\3"+ - "\2\2\2\u0083\u0084\3\2\2\2\u0084\u0086\7\r\2\2\u0085\u0087\5\36\20\2\u0086"+ - "\u0085\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0088\3\2\2\2\u0088\u008a\7\r"+ - "\2\2\u0089\u008b\5\22\n\2\u008a\u0089\3\2\2\2\u008a\u008b\3\2\2\2\u008b"+ - "\u008c\3\2\2\2\u008c\u008f\7\n\2\2\u008d\u0090\5\n\6\2\u008e\u0090\5\16"+ - "\b\2\u008f\u008d\3\2\2\2\u008f\u008e\3\2\2\2\u0090\u00bc\3\2\2\2\u0091"+ - "\u0092\7\23\2\2\u0092\u0093\7\t\2\2\u0093\u0094\5\26\f\2\u0094\u0095\7"+ - "R\2\2\u0095\u0096\7\65\2\2\u0096\u0097\5\36\20\2\u0097\u0098\7\n\2\2\u0098"+ - "\u0099\5\n\6\2\u0099\u00bc\3\2\2\2\u009a\u009b\7\23\2\2\u009b\u009c\7"+ - "\t\2\2\u009c\u009d\7R\2\2\u009d\u009e\7\17\2\2\u009e\u009f\5\36\20\2\u009f"+ - "\u00a0\7\n\2\2\u00a0\u00a1\5\n\6\2\u00a1\u00bc\3\2\2\2\u00a2\u00a3\5\24"+ - "\13\2\u00a3\u00a4\5\34\17\2\u00a4\u00bc\3\2\2\2\u00a5\u00a6\7\24\2\2\u00a6"+ - "\u00bc\5\34\17\2\u00a7\u00a8\7\25\2\2\u00a8\u00bc\5\34\17\2\u00a9\u00aa"+ - "\7\26\2\2\u00aa\u00ab\5\36\20\2\u00ab\u00ac\5\34\17\2\u00ac\u00bc\3\2"+ - "\2\2\u00ad\u00ae\7\30\2\2\u00ae\u00b0\5\f\7\2\u00af\u00b1\5\32\16\2\u00b0"+ - "\u00af\3\2\2\2\u00b1\u00b2\3\2\2\2\u00b2\u00b0\3\2\2\2\u00b2\u00b3\3\2"+ - "\2\2\u00b3\u00bc\3\2\2\2\u00b4\u00b5\7\32\2\2\u00b5\u00b6\5\36\20\2\u00b6"+ - "\u00b7\5\34\17\2\u00b7\u00bc\3\2\2\2\u00b8\u00b9\5\36\20\2\u00b9\u00ba"+ - "\5\34\17\2\u00ba\u00bc\3\2\2\2\u00bbe\3\2\2\2\u00bbo\3\2\2\2\u00bbw\3"+ - "\2\2\2\u00bb\177\3\2\2\2\u00bb\u0091\3\2\2\2\u00bb\u009a\3\2\2\2\u00bb"+ - "\u00a2\3\2\2\2\u00bb\u00a5\3\2\2\2\u00bb\u00a7\3\2\2\2\u00bb\u00a9\3\2"+ - "\2\2\u00bb\u00ad\3\2\2\2\u00bb\u00b4\3\2\2\2\u00bb\u00b8\3\2\2\2\u00bc"+ - "\t\3\2\2\2\u00bd\u00c0\5\f\7\2\u00be\u00c0\5\b\5\2\u00bf\u00bd\3\2\2\2"+ - "\u00bf\u00be\3\2\2\2\u00c0\13\3\2\2\2\u00c1\u00c5\7\5\2\2\u00c2\u00c4"+ - "\5\b\5\2\u00c3\u00c2\3\2\2\2\u00c4\u00c7\3\2\2\2\u00c5\u00c3\3\2\2\2\u00c5"+ - "\u00c6\3\2\2\2\u00c6\u00c8\3\2\2\2\u00c7\u00c5\3\2\2\2\u00c8\u00c9\7\6"+ - "\2\2\u00c9\r\3\2\2\2\u00ca\u00cb\7\r\2\2\u00cb\17\3\2\2\2\u00cc\u00cf"+ - "\5\24\13\2\u00cd\u00cf\5\36\20\2\u00ce\u00cc\3\2\2\2\u00ce\u00cd\3\2\2"+ - "\2\u00cf\21\3\2\2\2\u00d0\u00d1\5\36\20\2\u00d1\23\3\2\2\2\u00d2\u00d3"+ - "\5\26\f\2\u00d3\u00d8\5\30\r\2\u00d4\u00d5\7\f\2\2\u00d5\u00d7\5\30\r"+ - "\2\u00d6\u00d4\3\2\2\2\u00d7\u00da\3\2\2\2\u00d8\u00d6\3\2\2\2\u00d8\u00d9"+ - "\3\2\2\2\u00d9\25\3\2\2\2\u00da\u00d8\3\2\2\2\u00db\u00e0\7Q\2\2\u00dc"+ - "\u00dd\7\7\2\2\u00dd\u00df\7\b\2\2\u00de\u00dc\3\2\2\2\u00df\u00e2\3\2"+ - "\2\2\u00e0\u00de\3\2\2\2\u00e0\u00e1\3\2\2\2\u00e1\27\3\2\2\2\u00e2\u00e0"+ - "\3\2\2\2\u00e3\u00e6\7R\2\2\u00e4\u00e5\7<\2\2\u00e5\u00e7\5\36\20\2\u00e6"+ - "\u00e4\3\2\2\2\u00e6\u00e7\3\2\2\2\u00e7\31\3\2\2\2\u00e8\u00e9\7\31\2"+ - "\2\u00e9\u00ea\7\t\2\2\u00ea\u00eb\7Q\2\2\u00eb\u00ec\7R\2\2\u00ec\u00ed"+ - "\7\n\2\2\u00ed\u00ee\5\f\7\2\u00ee\33\3\2\2\2\u00ef\u00f0\t\2\2\2\u00f0"+ - "\35\3\2\2\2\u00f1\u00f2\b\20\1\2\u00f2\u00f3\5 \21\2\u00f3\u0123\3\2\2"+ - "\2\u00f4\u00f5\f\20\2\2\u00f5\u00f6\t\3\2\2\u00f6\u0122\5\36\20\21\u00f7"+ - "\u00f8\f\17\2\2\u00f8\u00f9\t\4\2\2\u00f9\u0122\5\36\20\20\u00fa\u00fb"+ - "\f\16\2\2\u00fb\u00fc\t\5\2\2\u00fc\u0122\5\36\20\17\u00fd\u00fe\f\r\2"+ - "\2\u00fe\u00ff\t\6\2\2\u00ff\u0122\5\36\20\16\u0100\u0101\f\f\2\2\u0101"+ - "\u0102\t\7\2\2\u0102\u0122\5\36\20\r\u0103\u0104\f\n\2\2\u0104\u0105\t"+ - "\b\2\2\u0105\u0122\5\36\20\13\u0106\u0107\f\t\2\2\u0107\u0108\7/\2\2\u0108"+ - "\u0122\5\36\20\n\u0109\u010a\f\b\2\2\u010a\u010b\7\60\2\2\u010b\u0122"+ - "\5\36\20\t\u010c\u010d\f\7\2\2\u010d\u010e\7\61\2\2\u010e\u0122\5\36\20"+ - "\b\u010f\u0110\f\6\2\2\u0110\u0111\7\62\2\2\u0111\u0122\5\36\20\7\u0112"+ - "\u0113\f\5\2\2\u0113\u0114\7\63\2\2\u0114\u0122\5\36\20\6\u0115\u0116"+ - "\f\4\2\2\u0116\u0117\7\64\2\2\u0117\u0118\5\36\20\2\u0118\u0119\7\65\2"+ - "\2\u0119\u011a\5\36\20\4\u011a\u0122\3\2\2\2\u011b\u011c\f\3\2\2\u011c"+ - "\u011d\t\t\2\2\u011d\u0122\5\36\20\3\u011e\u011f\f\13\2\2\u011f\u0120"+ - "\7\34\2\2\u0120\u0122\5\26\f\2\u0121\u00f4\3\2\2\2\u0121\u00f7\3\2\2\2"+ - "\u0121\u00fa\3\2\2\2\u0121\u00fd\3\2\2\2\u0121\u0100\3\2\2\2\u0121\u0103"+ - "\3\2\2\2\u0121\u0106\3\2\2\2\u0121\u0109\3\2\2\2\u0121\u010c\3\2\2\2\u0121"+ - "\u010f\3\2\2\2\u0121\u0112\3\2\2\2\u0121\u0115\3\2\2\2\u0121\u011b\3\2"+ - "\2\2\u0121\u011e\3\2\2\2\u0122\u0125\3\2\2\2\u0123\u0121\3\2\2\2\u0123"+ - "\u0124\3\2\2\2\u0124\37\3\2\2\2\u0125\u0123\3\2\2\2\u0126\u0127\t\n\2"+ - "\2\u0127\u0134\5\"\22\2\u0128\u0129\5\"\22\2\u0129\u012a\t\n\2\2\u012a"+ - "\u0134\3\2\2\2\u012b\u0134\5\"\22\2\u012c\u012d\t\13\2\2\u012d\u0134\5"+ - " \21\2\u012e\u012f\7\t\2\2\u012f\u0130\5\26\f\2\u0130\u0131\7\n\2\2\u0131"+ - "\u0132\5 \21\2\u0132\u0134\3\2\2\2\u0133\u0126\3\2\2\2\u0133\u0128\3\2"+ - "\2\2\u0133\u012b\3\2\2\2\u0133\u012c\3\2\2\2\u0133\u012e\3\2\2\2\u0134"+ - "!\3\2\2\2\u0135\u0139\5$\23\2\u0136\u0138\5&\24\2\u0137\u0136\3\2\2\2"+ - "\u0138\u013b\3\2\2\2\u0139\u0137\3\2\2\2\u0139\u013a\3\2\2\2\u013a\u0146"+ - "\3\2\2\2\u013b\u0139\3\2\2\2\u013c\u013d\5\26\f\2\u013d\u0141\5(\25\2"+ - "\u013e\u0140\5&\24\2\u013f\u013e\3\2\2\2\u0140\u0143\3\2\2\2\u0141\u013f"+ - "\3\2\2\2\u0141\u0142\3\2\2\2\u0142\u0146\3\2\2\2\u0143\u0141\3\2\2\2\u0144"+ - "\u0146\5\60\31\2\u0145\u0135\3\2\2\2\u0145\u013c\3\2\2\2\u0145\u0144\3"+ - "\2\2\2\u0146#\3\2\2\2\u0147\u0148\7\t\2\2\u0148\u0149\5\36\20\2\u0149"+ - "\u014a\7\n\2\2\u014a\u015a\3\2\2\2\u014b\u015a\t\f\2\2\u014c\u015a\7N"+ - "\2\2\u014d\u015a\7O\2\2\u014e\u015a\7P\2\2\u014f\u015a\7L\2\2\u0150\u015a"+ - "\7M\2\2\u0151\u015a\5\62\32\2\u0152\u015a\5\64\33\2\u0153\u015a\7R\2\2"+ - "\u0154\u0155\7R\2\2\u0155\u015a\58\35\2\u0156\u0157\7\27\2\2\u0157\u0158"+ - "\7Q\2\2\u0158\u015a\58\35\2\u0159\u0147\3\2\2\2\u0159\u014b\3\2\2\2\u0159"+ - "\u014c\3\2\2\2\u0159\u014d\3\2\2\2\u0159\u014e\3\2\2\2\u0159\u014f\3\2"+ - "\2\2\u0159\u0150\3\2\2\2\u0159\u0151\3\2\2\2\u0159\u0152\3\2\2\2\u0159"+ - "\u0153\3\2\2\2\u0159\u0154\3\2\2\2\u0159\u0156\3\2\2\2\u015a%\3\2\2\2"+ - "\u015b\u015f\5*\26\2\u015c\u015f\5,\27\2\u015d\u015f\5.\30\2\u015e\u015b"+ - "\3\2\2\2\u015e\u015c\3\2\2\2\u015e\u015d\3\2\2\2\u015f\'\3\2\2\2\u0160"+ - "\u0163\5*\26\2\u0161\u0163\5,\27\2\u0162\u0160\3\2\2\2\u0162\u0161\3\2"+ - "\2\2\u0163)\3\2\2\2\u0164\u0165\7\13\2\2\u0165\u0166\7T\2\2\u0166\u0167"+ - "\58\35\2\u0167+\3\2\2\2\u0168\u0169\7\13\2\2\u0169\u016a\t\r\2\2\u016a"+ - "-\3\2\2\2\u016b\u016c\7\7\2\2\u016c\u016d\5\36\20\2\u016d\u016e\7\b\2"+ - "\2\u016e/\3\2\2\2\u016f\u0170\7\27\2\2\u0170\u0175\7Q\2\2\u0171\u0172"+ - "\7\7\2\2\u0172\u0173\5\36\20\2\u0173\u0174\7\b\2\2\u0174\u0176\3\2\2\2"+ - "\u0175\u0171\3\2\2\2\u0176\u0177\3\2\2\2\u0177\u0175\3\2\2\2\u0177\u0178"+ - "\3\2\2\2\u0178\u0180\3\2\2\2\u0179\u017d\5(\25\2\u017a\u017c\5&\24\2\u017b"+ - "\u017a\3\2\2\2\u017c\u017f\3\2\2\2\u017d\u017b\3\2\2\2\u017d\u017e\3\2"+ - "\2\2\u017e\u0181\3\2\2\2\u017f\u017d\3\2\2\2\u0180\u0179\3\2\2\2\u0180"+ - "\u0181\3\2\2\2\u0181\u019c\3\2\2\2\u0182\u0183\7\27\2\2\u0183\u0184\7"+ - "Q\2\2\u0184\u0185\7\7\2\2\u0185\u0186\7\b\2\2\u0186\u018f\7\5\2\2\u0187"+ - "\u018c\5\36\20\2\u0188\u0189\7\f\2\2\u0189\u018b\5\36\20\2\u018a\u0188"+ - "\3\2\2\2\u018b\u018e\3\2\2\2\u018c\u018a\3\2\2\2\u018c\u018d\3\2\2\2\u018d"+ - "\u0190\3\2\2\2\u018e\u018c\3\2\2\2\u018f\u0187\3\2\2\2\u018f\u0190\3\2"+ - "\2\2\u0190\u0192\3\2\2\2\u0191\u0193\7\r\2\2\u0192\u0191\3\2\2\2\u0192"+ - "\u0193\3\2\2\2\u0193\u0194\3\2\2\2\u0194\u0198\7\6\2\2\u0195\u0197\5&"+ - "\24\2\u0196\u0195\3\2\2\2\u0197\u019a\3\2\2\2\u0198\u0196\3\2\2\2\u0198"+ - "\u0199\3\2\2\2\u0199\u019c\3\2\2\2\u019a\u0198\3\2\2\2\u019b\u016f\3\2"+ - "\2\2\u019b\u0182\3\2\2\2\u019c\61\3\2\2\2\u019d\u019e\7\7\2\2\u019e\u01a3"+ - "\5\36\20\2\u019f\u01a0\7\f\2\2\u01a0\u01a2\5\36\20\2\u01a1\u019f\3\2\2"+ - "\2\u01a2\u01a5\3\2\2\2\u01a3\u01a1\3\2\2\2\u01a3\u01a4\3\2\2\2\u01a4\u01a6"+ - "\3\2\2\2\u01a5\u01a3\3\2\2\2\u01a6\u01a7\7\b\2\2\u01a7\u01ab\3\2\2\2\u01a8"+ - "\u01a9\7\7\2\2\u01a9\u01ab\7\b\2\2\u01aa\u019d\3\2\2\2\u01aa\u01a8\3\2"+ - "\2\2\u01ab\63\3\2\2\2\u01ac\u01ad\7\7\2\2\u01ad\u01b2\5\66\34\2\u01ae"+ - "\u01af\7\f\2\2\u01af\u01b1\5\66\34\2\u01b0\u01ae\3\2\2\2\u01b1\u01b4\3"+ - "\2\2\2\u01b2\u01b0\3\2\2\2\u01b2\u01b3\3\2\2\2\u01b3\u01b5\3\2\2\2\u01b4"+ - "\u01b2\3\2\2\2\u01b5\u01b6\7\b\2\2\u01b6\u01bb\3\2\2\2\u01b7\u01b8\7\7"+ - "\2\2\u01b8\u01b9\7\65\2\2\u01b9\u01bb\7\b\2\2\u01ba\u01ac\3\2\2\2\u01ba"+ - "\u01b7\3\2\2\2\u01bb\65\3\2\2\2\u01bc\u01bd\5\36\20\2\u01bd\u01be\7\65"+ - "\2\2\u01be\u01bf\5\36\20\2\u01bf\67\3\2\2\2\u01c0\u01c9\7\t\2\2\u01c1"+ - "\u01c6\5:\36\2\u01c2\u01c3\7\f\2\2\u01c3\u01c5\5:\36\2\u01c4\u01c2\3\2"+ - "\2\2\u01c5\u01c8\3\2\2\2\u01c6\u01c4\3\2\2\2\u01c6\u01c7\3\2\2\2\u01c7"+ - "\u01ca\3\2\2\2\u01c8\u01c6\3\2\2\2\u01c9\u01c1\3\2\2\2\u01c9\u01ca\3\2"+ - "\2\2\u01ca\u01cb\3\2\2\2\u01cb\u01cc\7\n\2\2\u01cc9\3\2\2\2\u01cd\u01d1"+ - "\5\36\20\2\u01ce\u01d1\5<\37\2\u01cf\u01d1\5@!\2\u01d0\u01cd\3\2\2\2\u01d0"+ - "\u01ce\3\2\2\2\u01d0\u01cf\3\2\2\2\u01d1;\3\2\2\2\u01d2\u01e0\5> \2\u01d3"+ - "\u01dc\7\t\2\2\u01d4\u01d9\5> \2\u01d5\u01d6\7\f\2\2\u01d6\u01d8\5> \2"+ - "\u01d7\u01d5\3\2\2\2\u01d8\u01db\3\2\2\2\u01d9\u01d7\3\2\2\2\u01d9\u01da"+ - "\3\2\2\2\u01da\u01dd\3\2\2\2\u01db\u01d9\3\2\2\2\u01dc\u01d4\3\2\2\2\u01dc"+ - "\u01dd\3\2\2\2\u01dd\u01de\3\2\2\2\u01de\u01e0\7\n\2\2\u01df\u01d2\3\2"+ - "\2\2\u01df\u01d3\3\2\2\2\u01e0\u01e1\3\2\2\2\u01e1\u01e4\7\67\2\2\u01e2"+ - "\u01e5\5\f\7\2\u01e3\u01e5\5\36\20\2\u01e4\u01e2\3\2\2\2\u01e4\u01e3\3"+ - "\2\2\2\u01e5=\3\2\2\2\u01e6\u01e8\5\26\f\2\u01e7\u01e6\3\2\2\2\u01e7\u01e8"+ - "\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9\u01ea\7R\2\2\u01ea?\3\2\2\2\u01eb\u01ec"+ - "\7Q\2\2\u01ec\u01ed\7\66\2\2\u01ed\u01f9\7R\2\2\u01ee\u01ef\5\26\f\2\u01ef"+ - "\u01f0\7\66\2\2\u01f0\u01f1\7\27\2\2\u01f1\u01f9\3\2\2\2\u01f2\u01f3\7"+ - "R\2\2\u01f3\u01f4\7\66\2\2\u01f4\u01f9\7R\2\2\u01f5\u01f6\7\33\2\2\u01f6"+ - "\u01f7\7\66\2\2\u01f7\u01f9\7R\2\2\u01f8\u01eb\3\2\2\2\u01f8\u01ee\3\2"+ - "\2\2\u01f8\u01f2\3\2\2\2\u01f8\u01f5\3\2\2\2\u01f9A\3\2\2\2\62EK^amu\u0082"+ - "\u0086\u008a\u008f\u00b2\u00bb\u00bf\u00c5\u00ce\u00d8\u00e0\u00e6\u0121"+ - "\u0123\u0133\u0139\u0141\u0145\u0159\u015e\u0162\u0177\u017d\u0180\u018c"+ - "\u018f\u0192\u0198\u019b\u01a3\u01aa\u01b2\u01ba\u01c6\u01c9\u01d0\u01d9"+ - "\u01dc\u01df\u01e4\u01e7\u01f8"; + "\25\5\25\u0163\n\25\3\26\5\26\u0166\n\26\3\26\3\26\3\26\3\26\3\27\5\27"+ + "\u016d\n\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31"+ + "\3\31\6\31\u017c\n\31\r\31\16\31\u017d\3\31\3\31\7\31\u0182\n\31\f\31"+ + "\16\31\u0185\13\31\5\31\u0187\n\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31"+ + "\3\31\7\31\u0191\n\31\f\31\16\31\u0194\13\31\5\31\u0196\n\31\3\31\5\31"+ + "\u0199\n\31\3\31\3\31\7\31\u019d\n\31\f\31\16\31\u01a0\13\31\5\31\u01a2"+ + "\n\31\3\32\3\32\3\32\3\32\7\32\u01a8\n\32\f\32\16\32\u01ab\13\32\3\32"+ + "\3\32\3\32\3\32\5\32\u01b1\n\32\3\33\3\33\3\33\3\33\7\33\u01b7\n\33\f"+ + "\33\16\33\u01ba\13\33\3\33\3\33\3\33\3\33\3\33\5\33\u01c1\n\33\3\34\3"+ + "\34\3\34\3\34\3\35\3\35\3\35\3\35\7\35\u01cb\n\35\f\35\16\35\u01ce\13"+ + "\35\5\35\u01d0\n\35\3\35\3\35\3\36\3\36\3\36\5\36\u01d7\n\36\3\37\3\37"+ + "\3\37\3\37\3\37\7\37\u01de\n\37\f\37\16\37\u01e1\13\37\5\37\u01e3\n\37"+ + "\3\37\5\37\u01e6\n\37\3\37\3\37\3\37\5\37\u01eb\n\37\3 \5 \u01ee\n \3"+ + " \3 \3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\5!\u01ff\n!\3!\2\3\36\"\2"+ + "\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@\2\16\3"+ + "\3\r\r\3\2\37!\3\2\"#\3\289\3\2$&\3\2\'*\3\2+.\3\2\u01ed"+ + "\3\2\2\2@\u01fe\3\2\2\2BD\5\4\3\2CB\3\2\2\2DG\3\2\2\2EC\3\2\2\2EF\3\2"+ + "\2\2FK\3\2\2\2GE\3\2\2\2HJ\5\b\5\2IH\3\2\2\2JM\3\2\2\2KI\3\2\2\2KL\3\2"+ + "\2\2LN\3\2\2\2MK\3\2\2\2NO\7\2\2\3O\3\3\2\2\2PQ\5\26\f\2QR\7R\2\2RS\5"+ + "\6\4\2ST\5\f\7\2T\5\3\2\2\2Ua\7\t\2\2VW\5\26\f\2W^\7R\2\2XY\7\f\2\2YZ"+ + "\5\26\f\2Z[\7R\2\2[]\3\2\2\2\\X\3\2\2\2]`\3\2\2\2^\\\3\2\2\2^_\3\2\2\2"+ + "_b\3\2\2\2`^\3\2\2\2aV\3\2\2\2ab\3\2\2\2bc\3\2\2\2cd\7\n\2\2d\7\3\2\2"+ + "\2ef\7\16\2\2fg\7\t\2\2gh\5\36\20\2hi\7\n\2\2im\5\n\6\2jk\7\20\2\2kn\5"+ + "\n\6\2ln\6\5\2\2mj\3\2\2\2ml\3\2\2\2n\u00bc\3\2\2\2op\7\21\2\2pq\7\t\2"+ + "\2qr\5\36\20\2ru\7\n\2\2sv\5\n\6\2tv\5\16\b\2us\3\2\2\2ut\3\2\2\2v\u00bc"+ + "\3\2\2\2wx\7\22\2\2xy\5\f\7\2yz\7\21\2\2z{\7\t\2\2{|\5\36\20\2|}\7\n\2"+ + "\2}~\5\34\17\2~\u00bc\3\2\2\2\177\u0080\7\23\2\2\u0080\u0082\7\t\2\2\u0081"+ + "\u0083\5\20\t\2\u0082\u0081\3\2\2\2\u0082\u0083\3\2\2\2\u0083\u0084\3"+ + "\2\2\2\u0084\u0086\7\r\2\2\u0085\u0087\5\36\20\2\u0086\u0085\3\2\2\2\u0086"+ + "\u0087\3\2\2\2\u0087\u0088\3\2\2\2\u0088\u008a\7\r\2\2\u0089\u008b\5\22"+ + "\n\2\u008a\u0089\3\2\2\2\u008a\u008b\3\2\2\2\u008b\u008c\3\2\2\2\u008c"+ + "\u008f\7\n\2\2\u008d\u0090\5\n\6\2\u008e\u0090\5\16\b\2\u008f\u008d\3"+ + "\2\2\2\u008f\u008e\3\2\2\2\u0090\u00bc\3\2\2\2\u0091\u0092\7\23\2\2\u0092"+ + "\u0093\7\t\2\2\u0093\u0094\5\26\f\2\u0094\u0095\7R\2\2\u0095\u0096\7\65"+ + "\2\2\u0096\u0097\5\36\20\2\u0097\u0098\7\n\2\2\u0098\u0099\5\n\6\2\u0099"+ + "\u00bc\3\2\2\2\u009a\u009b\7\23\2\2\u009b\u009c\7\t\2\2\u009c\u009d\7"+ + "R\2\2\u009d\u009e\7\17\2\2\u009e\u009f\5\36\20\2\u009f\u00a0\7\n\2\2\u00a0"+ + "\u00a1\5\n\6\2\u00a1\u00bc\3\2\2\2\u00a2\u00a3\5\24\13\2\u00a3\u00a4\5"+ + "\34\17\2\u00a4\u00bc\3\2\2\2\u00a5\u00a6\7\24\2\2\u00a6\u00bc\5\34\17"+ + "\2\u00a7\u00a8\7\25\2\2\u00a8\u00bc\5\34\17\2\u00a9\u00aa\7\26\2\2\u00aa"+ + "\u00ab\5\36\20\2\u00ab\u00ac\5\34\17\2\u00ac\u00bc\3\2\2\2\u00ad\u00ae"+ + "\7\30\2\2\u00ae\u00b0\5\f\7\2\u00af\u00b1\5\32\16\2\u00b0\u00af\3\2\2"+ + "\2\u00b1\u00b2\3\2\2\2\u00b2\u00b0\3\2\2\2\u00b2\u00b3\3\2\2\2\u00b3\u00bc"+ + "\3\2\2\2\u00b4\u00b5\7\32\2\2\u00b5\u00b6\5\36\20\2\u00b6\u00b7\5\34\17"+ + "\2\u00b7\u00bc\3\2\2\2\u00b8\u00b9\5\36\20\2\u00b9\u00ba\5\34\17\2\u00ba"+ + "\u00bc\3\2\2\2\u00bbe\3\2\2\2\u00bbo\3\2\2\2\u00bbw\3\2\2\2\u00bb\177"+ + "\3\2\2\2\u00bb\u0091\3\2\2\2\u00bb\u009a\3\2\2\2\u00bb\u00a2\3\2\2\2\u00bb"+ + "\u00a5\3\2\2\2\u00bb\u00a7\3\2\2\2\u00bb\u00a9\3\2\2\2\u00bb\u00ad\3\2"+ + "\2\2\u00bb\u00b4\3\2\2\2\u00bb\u00b8\3\2\2\2\u00bc\t\3\2\2\2\u00bd\u00c0"+ + "\5\f\7\2\u00be\u00c0\5\b\5\2\u00bf\u00bd\3\2\2\2\u00bf\u00be\3\2\2\2\u00c0"+ + "\13\3\2\2\2\u00c1\u00c5\7\5\2\2\u00c2\u00c4\5\b\5\2\u00c3\u00c2\3\2\2"+ + "\2\u00c4\u00c7\3\2\2\2\u00c5\u00c3\3\2\2\2\u00c5\u00c6\3\2\2\2\u00c6\u00c8"+ + "\3\2\2\2\u00c7\u00c5\3\2\2\2\u00c8\u00c9\7\6\2\2\u00c9\r\3\2\2\2\u00ca"+ + "\u00cb\7\r\2\2\u00cb\17\3\2\2\2\u00cc\u00cf\5\24\13\2\u00cd\u00cf\5\36"+ + "\20\2\u00ce\u00cc\3\2\2\2\u00ce\u00cd\3\2\2\2\u00cf\21\3\2\2\2\u00d0\u00d1"+ + "\5\36\20\2\u00d1\23\3\2\2\2\u00d2\u00d3\5\26\f\2\u00d3\u00d8\5\30\r\2"+ + "\u00d4\u00d5\7\f\2\2\u00d5\u00d7\5\30\r\2\u00d6\u00d4\3\2\2\2\u00d7\u00da"+ + "\3\2\2\2\u00d8\u00d6\3\2\2\2\u00d8\u00d9\3\2\2\2\u00d9\25\3\2\2\2\u00da"+ + "\u00d8\3\2\2\2\u00db\u00e0\7Q\2\2\u00dc\u00dd\7\7\2\2\u00dd\u00df\7\b"+ + "\2\2\u00de\u00dc\3\2\2\2\u00df\u00e2\3\2\2\2\u00e0\u00de\3\2\2\2\u00e0"+ + "\u00e1\3\2\2\2\u00e1\27\3\2\2\2\u00e2\u00e0\3\2\2\2\u00e3\u00e6\7R\2\2"+ + "\u00e4\u00e5\7<\2\2\u00e5\u00e7\5\36\20\2\u00e6\u00e4\3\2\2\2\u00e6\u00e7"+ + "\3\2\2\2\u00e7\31\3\2\2\2\u00e8\u00e9\7\31\2\2\u00e9\u00ea\7\t\2\2\u00ea"+ + "\u00eb\7Q\2\2\u00eb\u00ec\7R\2\2\u00ec\u00ed\7\n\2\2\u00ed\u00ee\5\f\7"+ + "\2\u00ee\33\3\2\2\2\u00ef\u00f0\t\2\2\2\u00f0\35\3\2\2\2\u00f1\u00f2\b"+ + "\20\1\2\u00f2\u00f3\5 \21\2\u00f3\u0123\3\2\2\2\u00f4\u00f5\f\20\2\2\u00f5"+ + "\u00f6\t\3\2\2\u00f6\u0122\5\36\20\21\u00f7\u00f8\f\17\2\2\u00f8\u00f9"+ + "\t\4\2\2\u00f9\u0122\5\36\20\20\u00fa\u00fb\f\16\2\2\u00fb\u00fc\t\5\2"+ + "\2\u00fc\u0122\5\36\20\17\u00fd\u00fe\f\r\2\2\u00fe\u00ff\t\6\2\2\u00ff"+ + "\u0122\5\36\20\16\u0100\u0101\f\f\2\2\u0101\u0102\t\7\2\2\u0102\u0122"+ + "\5\36\20\r\u0103\u0104\f\n\2\2\u0104\u0105\t\b\2\2\u0105\u0122\5\36\20"+ + "\13\u0106\u0107\f\t\2\2\u0107\u0108\7/\2\2\u0108\u0122\5\36\20\n\u0109"+ + "\u010a\f\b\2\2\u010a\u010b\7\60\2\2\u010b\u0122\5\36\20\t\u010c\u010d"+ + "\f\7\2\2\u010d\u010e\7\61\2\2\u010e\u0122\5\36\20\b\u010f\u0110\f\6\2"+ + "\2\u0110\u0111\7\62\2\2\u0111\u0122\5\36\20\7\u0112\u0113\f\5\2\2\u0113"+ + "\u0114\7\63\2\2\u0114\u0122\5\36\20\6\u0115\u0116\f\4\2\2\u0116\u0117"+ + "\7\64\2\2\u0117\u0118\5\36\20\2\u0118\u0119\7\65\2\2\u0119\u011a\5\36"+ + "\20\4\u011a\u0122\3\2\2\2\u011b\u011c\f\3\2\2\u011c\u011d\t\t\2\2\u011d"+ + "\u0122\5\36\20\3\u011e\u011f\f\13\2\2\u011f\u0120\7\34\2\2\u0120\u0122"+ + "\5\26\f\2\u0121\u00f4\3\2\2\2\u0121\u00f7\3\2\2\2\u0121\u00fa\3\2\2\2"+ + "\u0121\u00fd\3\2\2\2\u0121\u0100\3\2\2\2\u0121\u0103\3\2\2\2\u0121\u0106"+ + "\3\2\2\2\u0121\u0109\3\2\2\2\u0121\u010c\3\2\2\2\u0121\u010f\3\2\2\2\u0121"+ + "\u0112\3\2\2\2\u0121\u0115\3\2\2\2\u0121\u011b\3\2\2\2\u0121\u011e\3\2"+ + "\2\2\u0122\u0125\3\2\2\2\u0123\u0121\3\2\2\2\u0123\u0124\3\2\2\2\u0124"+ + "\37\3\2\2\2\u0125\u0123\3\2\2\2\u0126\u0127\t\n\2\2\u0127\u0134\5\"\22"+ + "\2\u0128\u0129\5\"\22\2\u0129\u012a\t\n\2\2\u012a\u0134\3\2\2\2\u012b"+ + "\u0134\5\"\22\2\u012c\u012d\t\13\2\2\u012d\u0134\5 \21\2\u012e\u012f\7"+ + "\t\2\2\u012f\u0130\5\26\f\2\u0130\u0131\7\n\2\2\u0131\u0132\5 \21\2\u0132"+ + "\u0134\3\2\2\2\u0133\u0126\3\2\2\2\u0133\u0128\3\2\2\2\u0133\u012b\3\2"+ + "\2\2\u0133\u012c\3\2\2\2\u0133\u012e\3\2\2\2\u0134!\3\2\2\2\u0135\u0139"+ + "\5$\23\2\u0136\u0138\5&\24\2\u0137\u0136\3\2\2\2\u0138\u013b\3\2\2\2\u0139"+ + "\u0137\3\2\2\2\u0139\u013a\3\2\2\2\u013a\u0146\3\2\2\2\u013b\u0139\3\2"+ + "\2\2\u013c\u013d\5\26\f\2\u013d\u0141\5(\25\2\u013e\u0140\5&\24\2\u013f"+ + "\u013e\3\2\2\2\u0140\u0143\3\2\2\2\u0141\u013f\3\2\2\2\u0141\u0142\3\2"+ + "\2\2\u0142\u0146\3\2\2\2\u0143\u0141\3\2\2\2\u0144\u0146\5\60\31\2\u0145"+ + "\u0135\3\2\2\2\u0145\u013c\3\2\2\2\u0145\u0144\3\2\2\2\u0146#\3\2\2\2"+ + "\u0147\u0148\7\t\2\2\u0148\u0149\5\36\20\2\u0149\u014a\7\n\2\2\u014a\u015a"+ + "\3\2\2\2\u014b\u015a\t\f\2\2\u014c\u015a\7N\2\2\u014d\u015a\7O\2\2\u014e"+ + "\u015a\7P\2\2\u014f\u015a\7L\2\2\u0150\u015a\7M\2\2\u0151\u015a\5\62\32"+ + "\2\u0152\u015a\5\64\33\2\u0153\u015a\7R\2\2\u0154\u0155\7R\2\2\u0155\u015a"+ + "\58\35\2\u0156\u0157\7\27\2\2\u0157\u0158\7Q\2\2\u0158\u015a\58\35\2\u0159"+ + "\u0147\3\2\2\2\u0159\u014b\3\2\2\2\u0159\u014c\3\2\2\2\u0159\u014d\3\2"+ + "\2\2\u0159\u014e\3\2\2\2\u0159\u014f\3\2\2\2\u0159\u0150\3\2\2\2\u0159"+ + "\u0151\3\2\2\2\u0159\u0152\3\2\2\2\u0159\u0153\3\2\2\2\u0159\u0154\3\2"+ + "\2\2\u0159\u0156\3\2\2\2\u015a%\3\2\2\2\u015b\u015f\5*\26\2\u015c\u015f"+ + "\5,\27\2\u015d\u015f\5.\30\2\u015e\u015b\3\2\2\2\u015e\u015c\3\2\2\2\u015e"+ + "\u015d\3\2\2\2\u015f\'\3\2\2\2\u0160\u0163\5*\26\2\u0161\u0163\5,\27\2"+ + "\u0162\u0160\3\2\2\2\u0162\u0161\3\2\2\2\u0163)\3\2\2\2\u0164\u0166\7"+ + "\64\2\2\u0165\u0164\3\2\2\2\u0165\u0166\3\2\2\2\u0166\u0167\3\2\2\2\u0167"+ + "\u0168\7\13\2\2\u0168\u0169\7T\2\2\u0169\u016a\58\35\2\u016a+\3\2\2\2"+ + "\u016b\u016d\7\64\2\2\u016c\u016b\3\2\2\2\u016c\u016d\3\2\2\2\u016d\u016e"+ + "\3\2\2\2\u016e\u016f\7\13\2\2\u016f\u0170\t\r\2\2\u0170-\3\2\2\2\u0171"+ + "\u0172\7\7\2\2\u0172\u0173\5\36\20\2\u0173\u0174\7\b\2\2\u0174/\3\2\2"+ + "\2\u0175\u0176\7\27\2\2\u0176\u017b\7Q\2\2\u0177\u0178\7\7\2\2\u0178\u0179"+ + "\5\36\20\2\u0179\u017a\7\b\2\2\u017a\u017c\3\2\2\2\u017b\u0177\3\2\2\2"+ + "\u017c\u017d\3\2\2\2\u017d\u017b\3\2\2\2\u017d\u017e\3\2\2\2\u017e\u0186"+ + "\3\2\2\2\u017f\u0183\5(\25\2\u0180\u0182\5&\24\2\u0181\u0180\3\2\2\2\u0182"+ + "\u0185\3\2\2\2\u0183\u0181\3\2\2\2\u0183\u0184\3\2\2\2\u0184\u0187\3\2"+ + "\2\2\u0185\u0183\3\2\2\2\u0186\u017f\3\2\2\2\u0186\u0187\3\2\2\2\u0187"+ + "\u01a2\3\2\2\2\u0188\u0189\7\27\2\2\u0189\u018a\7Q\2\2\u018a\u018b\7\7"+ + "\2\2\u018b\u018c\7\b\2\2\u018c\u0195\7\5\2\2\u018d\u0192\5\36\20\2\u018e"+ + "\u018f\7\f\2\2\u018f\u0191\5\36\20\2\u0190\u018e\3\2\2\2\u0191\u0194\3"+ + "\2\2\2\u0192\u0190\3\2\2\2\u0192\u0193\3\2\2\2\u0193\u0196\3\2\2\2\u0194"+ + "\u0192\3\2\2\2\u0195\u018d\3\2\2\2\u0195\u0196\3\2\2\2\u0196\u0198\3\2"+ + "\2\2\u0197\u0199\7\r\2\2\u0198\u0197\3\2\2\2\u0198\u0199\3\2\2\2\u0199"+ + "\u019a\3\2\2\2\u019a\u019e\7\6\2\2\u019b\u019d\5&\24\2\u019c\u019b\3\2"+ + "\2\2\u019d\u01a0\3\2\2\2\u019e\u019c\3\2\2\2\u019e\u019f\3\2\2\2\u019f"+ + "\u01a2\3\2\2\2\u01a0\u019e\3\2\2\2\u01a1\u0175\3\2\2\2\u01a1\u0188\3\2"+ + "\2\2\u01a2\61\3\2\2\2\u01a3\u01a4\7\7\2\2\u01a4\u01a9\5\36\20\2\u01a5"+ + "\u01a6\7\f\2\2\u01a6\u01a8\5\36\20\2\u01a7\u01a5\3\2\2\2\u01a8\u01ab\3"+ + "\2\2\2\u01a9\u01a7\3\2\2\2\u01a9\u01aa\3\2\2\2\u01aa\u01ac\3\2\2\2\u01ab"+ + "\u01a9\3\2\2\2\u01ac\u01ad\7\b\2\2\u01ad\u01b1\3\2\2\2\u01ae\u01af\7\7"+ + "\2\2\u01af\u01b1\7\b\2\2\u01b0\u01a3\3\2\2\2\u01b0\u01ae\3\2\2\2\u01b1"+ + "\63\3\2\2\2\u01b2\u01b3\7\7\2\2\u01b3\u01b8\5\66\34\2\u01b4\u01b5\7\f"+ + "\2\2\u01b5\u01b7\5\66\34\2\u01b6\u01b4\3\2\2\2\u01b7\u01ba\3\2\2\2\u01b8"+ + "\u01b6\3\2\2\2\u01b8\u01b9\3\2\2\2\u01b9\u01bb\3\2\2\2\u01ba\u01b8\3\2"+ + "\2\2\u01bb\u01bc\7\b\2\2\u01bc\u01c1\3\2\2\2\u01bd\u01be\7\7\2\2\u01be"+ + "\u01bf\7\65\2\2\u01bf\u01c1\7\b\2\2\u01c0\u01b2\3\2\2\2\u01c0\u01bd\3"+ + "\2\2\2\u01c1\65\3\2\2\2\u01c2\u01c3\5\36\20\2\u01c3\u01c4\7\65\2\2\u01c4"+ + "\u01c5\5\36\20\2\u01c5\67\3\2\2\2\u01c6\u01cf\7\t\2\2\u01c7\u01cc\5:\36"+ + "\2\u01c8\u01c9\7\f\2\2\u01c9\u01cb\5:\36\2\u01ca\u01c8\3\2\2\2\u01cb\u01ce"+ + "\3\2\2\2\u01cc\u01ca\3\2\2\2\u01cc\u01cd\3\2\2\2\u01cd\u01d0\3\2\2\2\u01ce"+ + "\u01cc\3\2\2\2\u01cf\u01c7\3\2\2\2\u01cf\u01d0\3\2\2\2\u01d0\u01d1\3\2"+ + "\2\2\u01d1\u01d2\7\n\2\2\u01d29\3\2\2\2\u01d3\u01d7\5\36\20\2\u01d4\u01d7"+ + "\5<\37\2\u01d5\u01d7\5@!\2\u01d6\u01d3\3\2\2\2\u01d6\u01d4\3\2\2\2\u01d6"+ + "\u01d5\3\2\2\2\u01d7;\3\2\2\2\u01d8\u01e6\5> \2\u01d9\u01e2\7\t\2\2\u01da"+ + "\u01df\5> \2\u01db\u01dc\7\f\2\2\u01dc\u01de\5> \2\u01dd\u01db\3\2\2\2"+ + "\u01de\u01e1\3\2\2\2\u01df\u01dd\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0\u01e3"+ + "\3\2\2\2\u01e1\u01df\3\2\2\2\u01e2\u01da\3\2\2\2\u01e2\u01e3\3\2\2\2\u01e3"+ + "\u01e4\3\2\2\2\u01e4\u01e6\7\n\2\2\u01e5\u01d8\3\2\2\2\u01e5\u01d9\3\2"+ + "\2\2\u01e6\u01e7\3\2\2\2\u01e7\u01ea\7\67\2\2\u01e8\u01eb\5\f\7\2\u01e9"+ + "\u01eb\5\36\20\2\u01ea\u01e8\3\2\2\2\u01ea\u01e9\3\2\2\2\u01eb=\3\2\2"+ + "\2\u01ec\u01ee\5\26\f\2\u01ed\u01ec\3\2\2\2\u01ed\u01ee\3\2\2\2\u01ee"+ + "\u01ef\3\2\2\2\u01ef\u01f0\7R\2\2\u01f0?\3\2\2\2\u01f1\u01f2\7Q\2\2\u01f2"+ + "\u01f3\7\66\2\2\u01f3\u01ff\7R\2\2\u01f4\u01f5\5\26\f\2\u01f5\u01f6\7"+ + "\66\2\2\u01f6\u01f7\7\27\2\2\u01f7\u01ff\3\2\2\2\u01f8\u01f9\7R\2\2\u01f9"+ + "\u01fa\7\66\2\2\u01fa\u01ff\7R\2\2\u01fb\u01fc\7\33\2\2\u01fc\u01fd\7"+ + "\66\2\2\u01fd\u01ff\7R\2\2\u01fe\u01f1\3\2\2\2\u01fe\u01f4\3\2\2\2\u01fe"+ + "\u01f8\3\2\2\2\u01fe\u01fb\3\2\2\2\u01ffA\3\2\2\2\64EK^amu\u0082\u0086"+ + "\u008a\u008f\u00b2\u00bb\u00bf\u00c5\u00ce\u00d8\u00e0\u00e6\u0121\u0123"+ + "\u0133\u0139\u0141\u0145\u0159\u015e\u0162\u0165\u016c\u017d\u0183\u0186"+ + "\u0192\u0195\u0198\u019e\u01a1\u01a9\u01b0\u01b8\u01c0\u01cc\u01cf\u01d6"+ + "\u01df\u01e2\u01e5\u01ea\u01ed\u01fe"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index da430f4280a..5659afc75ea 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -898,7 +898,7 @@ public final class Walker extends PainlessParserBaseVisitor { String name = ctx.DOTID().getText(); List arguments = collectArguments(ctx.arguments()); - return new PCallInvoke(location(ctx), prefix, name, arguments); + return new PCallInvoke(location(ctx), prefix, name, ctx.COND() != null, arguments); } @Override @@ -917,7 +917,7 @@ public final class Walker extends PainlessParserBaseVisitor { throw location(ctx).createError(new IllegalStateException("Illegal tree structure.")); } - return new PField(location(ctx), prefix, value); + return new PField(location(ctx), prefix, ctx.COND() != null, value); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java index 71b8ccd4da1..3cff6bab08e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java @@ -23,8 +23,11 @@ import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.objectweb.asm.Label; +import org.objectweb.asm.Opcodes; import java.util.Objects; +import java.util.function.Consumer; /** * The super class for an expression that can store a value in local memory. @@ -100,4 +103,21 @@ abstract class AStoreable extends AExpression { * Called to store a storabable to local memory. */ abstract void store(MethodWriter writer, Globals globals); + + /** + * Writes the opcodes to flip a negative array index (meaning slots from the end of the array) into a 0-based one (meaning slots from + * the start of the array). + */ + static void writeIndexFlip(MethodWriter writer, Consumer writeGetLength) { + Label noFlip = new Label(); + // Everywhere when it says 'array' below that could also be a list + // The stack after each instruction: array, unnormalized_index + writer.dup(); // array, unnormalized_index, unnormalized_index + writer.ifZCmp(Opcodes.IFGE, noFlip); // array, unnormalized_index + writer.swap(); // negative_index, array + writer.dupX1(); // array, negative_index, array + writeGetLength.accept(writer); // array, negative_index, length + writer.visitInsn(Opcodes.IADD); // array, noralized_index + writer.mark(noFlip); // array, noralized_index + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java index 1f9fe8bdfcb..54a4aceb734 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java @@ -34,7 +34,7 @@ import org.elasticsearch.painless.MethodWriter; import org.objectweb.asm.Opcodes; /** - * Respresents a conditional expression. + * Represents a conditional expression. */ public final class EConditional extends AExpression { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java index e8cfb1eba2e..9d405a7b00a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java @@ -39,14 +39,16 @@ import java.util.Set; public final class PCallInvoke extends AExpression { private final String name; + private final boolean nullSafe; private final List arguments; private AExpression sub = null; - public PCallInvoke(Location location, AExpression prefix, String name, List arguments) { + public PCallInvoke(Location location, AExpression prefix, String name, boolean nullSafe, List arguments) { super(location, prefix); this.name = Objects.requireNonNull(name); + this.nullSafe = nullSafe; this.arguments = Objects.requireNonNull(arguments); } @@ -87,6 +89,10 @@ public final class PCallInvoke extends AExpression { "Unknown call [" + name + "] with [" + arguments.size() + "] arguments on type [" + struct.name + "].")); } + if (nullSafe) { + sub = new PSubNullSafeCallInvoke(location, sub); + } + sub.expected = expected; sub.explicit = explicit; sub.analyze(locals); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java index 21a3def3189..ea23d3cdd07 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java @@ -40,13 +40,15 @@ import java.util.Set; */ public final class PField extends AStoreable { + private final boolean nullSafe; private final String value; private AStoreable sub = null; - public PField(Location location, AExpression prefix, String value) { + public PField(Location location, AExpression prefix, boolean nullSafe, String value) { super(location, prefix); + this.nullSafe = nullSafe; this.value = Objects.requireNonNull(value); } @@ -106,6 +108,10 @@ public final class PField extends AStoreable { throw createError(new IllegalArgumentException("Unknown field [" + value + "] for type [" + prefix.actual.name + "].")); } + if (nullSafe) { + sub = new PSubNullSafeField(location, sub); + } + sub.write = write; sub.read = read; sub.expected = expected; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubBrace.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubBrace.java index 45b3ef88cd1..a6fb3cefbb1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubBrace.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubBrace.java @@ -60,10 +60,8 @@ final class PSubBrace extends AStoreable { @Override void write(MethodWriter writer, Globals globals) { - if (!write) { - setup(writer, globals); - load(writer, globals); - } + setup(writer, globals); + load(writer, globals); } @Override @@ -84,6 +82,7 @@ final class PSubBrace extends AStoreable { @Override void setup(MethodWriter writer, Globals globals) { index.write(writer, globals); + writeIndexFlip(writer, MethodWriter::arrayLength); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java index 2153897a000..2776fffec61 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java @@ -34,7 +34,6 @@ import java.util.Set; * Represents an array load/store or shortcut on a def type. (Internal only.) */ final class PSubDefArray extends AStoreable { - private AExpression index; PSubDefArray(Location location, AExpression index) { @@ -59,13 +58,8 @@ final class PSubDefArray extends AStoreable { @Override void write(MethodWriter writer, Globals globals) { - index.write(writer, globals); - - writer.writeDebugInfo(location); - - org.objectweb.asm.Type methodType = - org.objectweb.asm.Type.getMethodType(actual.type, Definition.DEF_TYPE.type, index.actual.type); - writer.invokeDefCall("arrayLoad", methodType, DefBootstrap.ARRAY_LOAD); + setup(writer, globals); + load(writer, globals); } @Override @@ -85,7 +79,12 @@ final class PSubDefArray extends AStoreable { @Override void setup(MethodWriter writer, Globals globals) { - index.write(writer, globals); + // Current stack: def + writer.dup(); // def, def + index.write(writer, globals); // def, def, unnormalized_index + org.objectweb.asm.Type methodType = org.objectweb.asm.Type.getMethodType( + index.actual.type, Definition.DEF_TYPE.type, index.actual.type); + writer.invokeDefCall("normalizeIndex", methodType, DefBootstrap.INDEX_NORMALIZE); // def, normalized_index } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java index c13f8235821..5b8396f72d3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java @@ -28,6 +28,7 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.WriterConstants; import java.util.Objects; import java.util.Set; @@ -87,15 +88,8 @@ final class PSubListShortcut extends AStoreable { @Override void write(MethodWriter writer, Globals globals) { - index.write(writer, globals); - - writer.writeDebugInfo(location); - - getter.write(writer); - - if (!getter.rtn.clazz.equals(getter.handle.type().returnType())) { - writer.checkCast(getter.rtn.type); - } + setup(writer, globals); + load(writer, globals); } @Override @@ -116,6 +110,9 @@ final class PSubListShortcut extends AStoreable { @Override void setup(MethodWriter writer, Globals globals) { index.write(writer, globals); + writeIndexFlip(writer, w -> { + w.invokeInterface(WriterConstants.COLLECTION_TYPE, WriterConstants.COLLECTION_SIZE); + }); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubNullSafeCallInvoke.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubNullSafeCallInvoke.java new file mode 100644 index 00000000000..51349949dbd --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubNullSafeCallInvoke.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.node; + +import org.elasticsearch.painless.Definition; +import org.elasticsearch.painless.Globals; +import org.elasticsearch.painless.Locals; +import org.elasticsearch.painless.Location; +import org.elasticsearch.painless.MethodWriter; +import org.objectweb.asm.Label; + +import java.util.Set; + +import static java.util.Objects.requireNonNull; + +/** + * Implements a call who's value is null if the prefix is null rather than throwing an NPE. + */ +public class PSubNullSafeCallInvoke extends AExpression { + /** + * The expression gaurded by the null check. Required at construction time and replaced at analysis time. + */ + private AExpression guarded; + + public PSubNullSafeCallInvoke(Location location, AExpression guarded) { + super(location); + this.guarded = requireNonNull(guarded); + } + + @Override + void extractVariables(Set variables) { + guarded.extractVariables(variables); + } + + @Override + void analyze(Locals locals) { + guarded.analyze(locals); + actual = guarded.actual; + if (actual.sort.primitive) { + // Result must be nullable. We emit boxing instructions if needed. + actual = Definition.getType(actual.sort.boxed.getSimpleName()); + } + } + + @Override + void write(MethodWriter writer, Globals globals) { + writer.writeDebugInfo(location); + + Label end = new Label(); + writer.dup(); + writer.ifNull(end); + guarded.write(writer, globals); + if (guarded.actual.sort.primitive) { + // Box primitives so they are nullable + writer.box(guarded.actual.type); + } + writer.mark(end); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubNullSafeField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubNullSafeField.java new file mode 100644 index 00000000000..32ad6c0cb62 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubNullSafeField.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.node; + +import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Definition; +import org.elasticsearch.painless.Globals; +import org.elasticsearch.painless.Locals; +import org.elasticsearch.painless.Location; +import org.elasticsearch.painless.MethodWriter; +import org.objectweb.asm.Label; + +import java.util.Set; + +/** + * Implements a field who's value is null if the prefix is null rather than throwing an NPE. + */ +public class PSubNullSafeField extends AStoreable { + private AStoreable guarded; + + public PSubNullSafeField(Location location, AStoreable guarded) { + super(location); + this.guarded = guarded; + } + + @Override + void extractVariables(Set variables) { + guarded.extractVariables(variables); + } + + @Override + void analyze(Locals locals) { + if (write) { + throw createError(new IllegalArgumentException("Can't write to null safe reference")); + } + guarded.read = read; + guarded.analyze(locals); + actual = guarded.actual; + if (actual.sort.primitive) { + // Result must be nullable. We emit boxing instructions if needed. + actual = Definition.getType(actual.sort.boxed.getSimpleName()); + } + } + + + @Override + int accessElementCount() { + return guarded.accessElementCount(); + } + + @Override + boolean isDefOptimized() { + return guarded.isDefOptimized(); + } + + @Override + void updateActual(Type actual) { + guarded.updateActual(actual); + } + + @Override + void write(MethodWriter writer, Globals globals) { + Label end = new Label(); + writer.dup(); + writer.ifNull(end); + guarded.write(writer, globals); + if (guarded.actual.sort.primitive) { + // Box primitives so they are nullable + writer.box(guarded.actual.type); + } + writer.mark(end); + } + + @Override + void setup(MethodWriter writer, Globals globals) { + throw createError(new IllegalArgumentException("Can't write to null safe field")); + } + + @Override + void load(MethodWriter writer, Globals globals) { + throw createError(new IllegalArgumentException("Can't write to null safe field")); + } + + @Override + void store(MethodWriter writer, Globals globals) { + throw createError(new IllegalArgumentException("Can't write to null safe field")); + } +} diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt index 7374efea04b..286141e2921 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt @@ -82,8 +82,6 @@ class Chronology -> java.time.chrono.Chronology extends Comparable { ChronoLocalDate date(Era,int,int,int) ChronoLocalDate date(int,int,int) ChronoLocalDate dateEpochDay(long) - ChronoLocalDate dateNow() - ChronoLocalDate dateNow(ZoneId) ChronoLocalDate dateYearDay(Era,int,int) ChronoLocalDate dateYearDay(int,int) boolean equals(Object) @@ -171,8 +169,6 @@ class HijrahChronology -> java.time.chrono.HijrahChronology extends AbstractChro HijrahDate date(int,int,int) HijrahDate date(Era,int,int,int) HijrahDate dateEpochDay(long) - HijrahDate dateNow() - HijrahDate dateNow(ZoneId) HijrahDate dateYearDay(int,int) HijrahDate dateYearDay(Era,int,int) HijrahEra eraOf(int) @@ -185,8 +181,6 @@ class HijrahDate -> java.time.chrono.HijrahDate extends ChronoLocalDate,Temporal HijrahEra getEra() HijrahDate minus(TemporalAmount) HijrahDate minus(long,TemporalUnit) - HijrahDate now() - HijrahDate now(ZoneId) HijrahDate of(int,int,int) HijrahDate plus(TemporalAmount) HijrahDate plus(long,TemporalUnit) @@ -201,8 +195,6 @@ class IsoChronology -> java.time.chrono.IsoChronology extends AbstractChronology LocalDate date(int,int,int) LocalDate date(Era,int,int,int) LocalDate dateEpochDay(long) - LocalDate dateNow() - LocalDate dateNow(ZoneId) LocalDate dateYearDay(int,int) LocalDate dateYearDay(Era,int,int) IsoEra eraOf(int) @@ -219,8 +211,6 @@ class JapaneseChronology -> java.time.chrono.JapaneseChronology extends Abstract JapaneseDate date(int,int,int) JapaneseDate date(Era,int,int,int) JapaneseDate dateEpochDay(long) - JapaneseDate dateNow() - JapaneseDate dateNow(ZoneId) JapaneseDate dateYearDay(int,int) JapaneseDate dateYearDay(Era,int,int) JapaneseEra eraOf(int) @@ -228,8 +218,6 @@ class JapaneseChronology -> java.time.chrono.JapaneseChronology extends Abstract } class JapaneseDate -> java.time.chrono.JapaneseDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { - JapaneseDate now() - JapaneseDate now(ZoneId) JapaneseDate of(int,int,int) JapaneseDate from(TemporalAccessor) JapaneseChronology getChronology() @@ -259,8 +247,6 @@ class MinguoChronology -> java.time.chrono.MinguoChronology extends AbstractChro MinguoDate date(int,int,int) MinguoDate date(Era,int,int,int) MinguoDate dateEpochDay(long) - MinguoDate dateNow() - MinguoDate dateNow(ZoneId) MinguoDate dateYearDay(int,int) MinguoDate dateYearDay(Era,int,int) MinguoEra eraOf(int) @@ -268,8 +254,6 @@ class MinguoChronology -> java.time.chrono.MinguoChronology extends AbstractChro } class MinguoDate -> java.time.chrono.MinguoDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { - MinguoDate now() - MinguoDate now(ZoneId) MinguoDate of(int,int,int) MinguoDate from(TemporalAccessor) MinguoChronology getChronology() @@ -288,8 +272,6 @@ class ThaiBuddhistChronology -> java.time.chrono.ThaiBuddhistChronology extends ThaiBuddhistDate date(int,int,int) ThaiBuddhistDate date(Era,int,int,int) ThaiBuddhistDate dateEpochDay(long) - ThaiBuddhistDate dateNow() - ThaiBuddhistDate dateNow(ZoneId) ThaiBuddhistDate dateYearDay(int,int) ThaiBuddhistDate dateYearDay(Era,int,int) ThaiBuddhistEra eraOf(int) @@ -297,8 +279,6 @@ class ThaiBuddhistChronology -> java.time.chrono.ThaiBuddhistChronology extends } class ThaiBuddhistDate -> java.time.chrono.ThaiBuddhistDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { - ThaiBuddhistDate now() - ThaiBuddhistDate now(ZoneId) ThaiBuddhistDate of(int,int,int) ThaiBuddhistDate from(TemporalAccessor) ThaiBuddhistChronology getChronology() diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt index 4481004fdf6..35f19b0abdd 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt @@ -30,13 +30,7 @@ class Clock -> java.time.Clock extends Object { Instant instant() long millis() Clock offset(Clock,Duration) - Clock system(ZoneId) - Clock systemDefaultZone() - Clock systemUTC() Clock tick(Clock,Duration) - Clock tickMinutes(ZoneId) - Clock tickSeconds(ZoneId) - Clock withZone(ZoneId) } class Duration -> java.time.Duration extends Comparable,TemporalAmount,Object { @@ -103,8 +97,6 @@ class Instant -> java.time.Instant extends Comparable,Temporal,TemporalAccessor, Instant minusMillis(long) Instant minusNanos(long) Instant minusSeconds(long) - Instant now() - Instant now(Clock) Instant ofEpochSecond(long) Instant ofEpochSecond(long,long) Instant ofEpochMilli(long) @@ -143,8 +135,6 @@ class LocalDate -> java.time.LocalDate extends ChronoLocalDate,Temporal,Temporal LocalDate minusMonths(long) LocalDate minusWeeks(long) LocalDate minusDays(long) - LocalDate now() - LocalDate now(ZoneId) LocalDate of(int,int,int) LocalDate ofYearDay(int,int) LocalDate ofEpochDay(long) @@ -191,8 +181,6 @@ class LocalDateTime -> java.time.LocalDateTime extends ChronoLocalDateTime,Tempo LocalDateTime minusSeconds(long) LocalDateTime minusWeeks(long) LocalDateTime minusYears(long) - LocalDateTime now() - LocalDateTime now(ZoneId) LocalDateTime of(LocalDate,LocalTime) LocalDateTime of(int,int,int,int,int) LocalDateTime of(int,int,int,int,int,int) @@ -246,8 +234,6 @@ class LocalTime -> java.time.LocalTime extends Temporal,TemporalAccessor,Tempora LocalTime minusMinutes(long) LocalTime minusNanos(long) LocalTime minusSeconds(long) - LocalTime now() - LocalTime now(ZoneId) LocalTime of(int,int) LocalTime of(int,int,int) LocalTime of(int,int,int,int) @@ -283,8 +269,6 @@ class MonthDay -> java.time.MonthDay extends TemporalAccessor,TemporalAdjuster,C boolean isAfter(MonthDay) boolean isBefore(MonthDay) boolean isValidYear(int) - MonthDay now() - MonthDay now(ZoneId) MonthDay of(int,int) MonthDay parse(CharSequence) MonthDay parse(CharSequence,DateTimeFormatter) @@ -325,8 +309,6 @@ class OffsetDateTime -> java.time.OffsetDateTime extends Temporal,TemporalAccess OffsetDateTime minusMinutes(long) OffsetDateTime minusSeconds(long) OffsetDateTime minusNanos(long) - OffsetDateTime now() - OffsetDateTime now(ZoneId) OffsetDateTime of(LocalDate,LocalTime,ZoneOffset) OffsetDateTime of(LocalDateTime,ZoneOffset) OffsetDateTime of(int,int,int,int,int,int,int,ZoneOffset) @@ -380,8 +362,6 @@ class OffsetTime -> java.time.OffsetTime extends Temporal,TemporalAccessor,Tempo boolean isAfter(OffsetTime) boolean isBefore(OffsetTime) boolean isEqual(OffsetTime) - OffsetTime now() - OffsetTime now(ZoneId) OffsetTime of(LocalTime,ZoneOffset) OffsetTime of(int,int,int,int,ZoneOffset) OffsetTime ofInstant(Instant,ZoneId) @@ -460,8 +440,6 @@ class Year -> java.time.Year extends Temporal,TemporalAccessor,TemporalAdjuster, Year minus(TemporalAmount) Year minus(long,TemporalUnit) Year minusYears(long) - Year now() - Year now(ZoneId) Year of(int) Year parse(CharSequence) Year parse(CharSequence,DateTimeFormatter) @@ -491,8 +469,6 @@ class YearMonth -> java.time.YearMonth extends Temporal,TemporalAccessor,Tempora YearMonth minus(long,TemporalUnit) YearMonth minusYears(long) YearMonth minusMonths(long) - YearMonth now() - YearMonth now(ZoneId) YearMonth of(int,int) YearMonth parse(CharSequence) YearMonth parse(CharSequence,DateTimeFormatter) @@ -530,8 +506,6 @@ class ZonedDateTime -> java.time.ZonedDateTime extends ChronoZonedDateTime,Tempo ZonedDateTime minusMinutes(long) ZonedDateTime minusSeconds(long) ZonedDateTime minusNanos(long) - ZonedDateTime now() - ZonedDateTime now(ZoneId) ZonedDateTime of(LocalDate,LocalTime,ZoneId) ZonedDateTime of(LocalDateTime,ZoneId) ZonedDateTime of(int,int,int,int,int,int,int,ZoneId) diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt index 3757e4fb76c..19af8204f28 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt @@ -101,6 +101,12 @@ class org.elasticsearch.index.fielddata.ScriptDocValues.GeoPoints -> org.elastic double geohashDistanceWithDefault(String,double) } +class org.elasticsearch.index.fielddata.ScriptDocValues.Booleans -> org.elasticsearch.index.fielddata.ScriptDocValues$Booleans extends List,Collection,Iterable,Object { + Boolean get(int) + boolean getValue() + List getValues() +} + # for testing. # currently FeatureTest exposes overloaded constructor, field load store, and overloaded static methods class org.elasticsearch.painless.FeatureTest -> org.elasticsearch.painless.FeatureTest extends Object { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java new file mode 100644 index 00000000000..69b40f141e2 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.common.Nullable; +import org.hamcrest.Matcher; + +import static java.util.Collections.singletonMap; + +/** + * Superclass for testing array-like objects (arrays and lists). + */ +public abstract class ArrayLikeObjectTestCase extends ScriptTestCase { + /** + * Build the string for declaring the variable holding the array-like-object to test. So {@code int[]} for arrays and {@code List} for + * lists. + */ + protected abstract String declType(String valueType); + /** + * Build the string for calling the constructor for the array-like-object to test. So {@code new int[5]} for arrays and + * {@code [0, 0, 0, 0, 0]} or {@code [null, null, null, null, null]} for lists. + */ + protected abstract String valueCtorCall(String valueType, int size); + /** + * Matcher for the message of the out of bounds exceptions thrown for too negative or too positive offsets. + */ + protected abstract Matcher outOfBoundsExceptionMessageMatcher(int index, int size); + + private void arrayLoadStoreTestCase(boolean declareAsDef, String valueType, Object val, @Nullable Number valPlusOne) { + String declType = declareAsDef ? "def" : declType(valueType); + String valueCtorCall = valueCtorCall(valueType, 5); + String decl = declType + " x = " + valueCtorCall; + assertEquals(5, exec(decl + "; return x.length", true)); + assertEquals(val, exec(decl + "; x[ 0] = params.val; return x[ 0];", singletonMap("val", val), true)); + assertEquals(val, exec(decl + "; x[ 0] = params.val; return x[-5];", singletonMap("val", val), true)); + assertEquals(val, exec(decl + "; x[-5] = params.val; return x[-5];", singletonMap("val", val), true)); + + expectOutOfBounds( 6, decl + "; return x[ 6]", val); + expectOutOfBounds(-1, decl + "; return x[-6]", val); + expectOutOfBounds( 6, decl + "; x[ 6] = params.val; return 0", val); + expectOutOfBounds(-1, decl + "; x[-6] = params.val; return 0", val); + + if (valPlusOne != null) { + assertEquals(val, exec(decl + "; x[0] = params.val; x[ 0] = x[ 0]++; return x[0];", singletonMap("val", val), true)); + assertEquals(val, exec(decl + "; x[0] = params.val; x[ 0] = x[-5]++; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[ 0] = ++x[ 0]; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[ 0] = ++x[-5]; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[ 0]++ ; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[-5]++ ; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[ 0] += 1 ; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[-5] += 1 ; return x[0];", singletonMap("val", val), true)); + + expectOutOfBounds( 6, decl + "; return x[ 6]++", val); + expectOutOfBounds(-1, decl + "; return x[-6]++", val); + expectOutOfBounds( 6, decl + "; return ++x[ 6]", val); + expectOutOfBounds(-1, decl + "; return ++x[-6]", val); + expectOutOfBounds( 6, decl + "; x[ 6] += 1; return 0", val); + expectOutOfBounds(-1, decl + "; x[-6] += 1; return 0", val); + } + } + + private void expectOutOfBounds(int index, String script, Object val) { + IndexOutOfBoundsException e = expectScriptThrows(IndexOutOfBoundsException.class, + () -> exec(script, singletonMap("val", val), true)); + try { + assertThat(e.getMessage(), outOfBoundsExceptionMessageMatcher(index, 5)); + } catch (AssertionError ae) { + ae.addSuppressed(e); // Mark the exception we are testing as suppressed so we get its stack trace. If it has one :( + throw ae; + } + } + + public void testInts() { arrayLoadStoreTestCase(false, "int", 5, 6); } + public void testIntsInDef() { arrayLoadStoreTestCase(true, "int", 5, 6); } + public void testLongs() { arrayLoadStoreTestCase(false, "long", 5L, 6L); } + public void testLongsInDef() { arrayLoadStoreTestCase(true, "long", 5L, 6L); } + public void testShorts() { arrayLoadStoreTestCase(false, "short", (short) 5, (short) 6); } + public void testShortsInDef() { arrayLoadStoreTestCase(true, "short", (short) 5, (short) 6); } + public void testBytes() { arrayLoadStoreTestCase(false, "byte", (byte) 5, (byte) 6); } + public void testBytesInDef() { arrayLoadStoreTestCase(true, "byte", (byte) 5, (byte) 6); } + public void testFloats() { arrayLoadStoreTestCase(false, "float", 5.0f, 6.0f); } + public void testFloatsInDef() { arrayLoadStoreTestCase(true, "float", 5.0f, 6.0f); } + public void testDoubles() { arrayLoadStoreTestCase(false, "double", 5.0d, 6.0d); } + public void testDoublesInDef() { arrayLoadStoreTestCase(true, "double", 5.0d, 6.0d); } + public void testStrings() { arrayLoadStoreTestCase(false, "String", "cat", null); } + public void testStringsInDef() { arrayLoadStoreTestCase(true, "String", "cat", null); } + public void testDef() { arrayLoadStoreTestCase(true, "def", 5, null); } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java index acacc613ab3..fe2ee1683bb 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java @@ -19,11 +19,29 @@ package org.elasticsearch.painless; +import org.hamcrest.Matcher; + import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodType; -/** Tests for or operator across all types */ -public class ArrayTests extends ScriptTestCase { +import static org.hamcrest.Matchers.equalTo; + +/** Tests for working with arrays. */ +public class ArrayTests extends ArrayLikeObjectTestCase { + @Override + protected String declType(String valueType) { + return valueType + "[]"; + } + + @Override + protected String valueCtorCall(String valueType, int size) { + return "new " + valueType + "[" + size + "]"; + } + + @Override + protected Matcher outOfBoundsExceptionMessageMatcher(int index, int size) { + return equalTo(Integer.toString(index)); + } public void testArrayLengthHelper() throws Throwable { assertArrayLength(2, new int[2]); @@ -45,29 +63,6 @@ public class ArrayTests extends ScriptTestCase { .invokeExact(array)); } - public void testArrayLoadStoreInt() { - assertEquals(5, exec("def x = new int[5]; return x.length")); - assertEquals(5, exec("def x = new int[4]; x[0] = 5; return x[0];")); - } - - public void testArrayLoadStoreString() { - assertEquals(5, exec("def x = new String[5]; return x.length")); - assertEquals("foobar", exec("def x = new String[4]; x[0] = 'foobar'; return x[0];")); - } - - public void testArrayLoadStoreDef() { - assertEquals(5, exec("def x = new def[5]; return x.length")); - assertEquals(5, exec("def x = new def[4]; x[0] = 5; return x[0];")); - } - - public void testArrayCompoundInt() { - assertEquals(6, exec("int[] x = new int[5]; x[0] = 5; x[0]++; return x[0];")); - } - - public void testArrayCompoundDef() { - assertEquals(6, exec("def x = new int[5]; x[0] = 5; x[0]++; return x[0];")); - } - public void testJacksCrazyExpression1() { assertEquals(1, exec("int x; def[] y = new def[1]; x = y[0] = 1; return x;")); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java index cbfdd31b143..947d86f48c7 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java @@ -2,6 +2,8 @@ package org.elasticsearch.painless; import java.util.Collections; +import static java.util.Collections.singletonMap; + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -135,4 +137,73 @@ public class BasicExpressionTests extends ScriptTestCase { assertEquals(2, exec("int x = 5; return (x+x)/x;")); assertEquals(true, exec("boolean t = true, f = false; return t && (f || t);")); } + + public void testNullSafeDeref() { + // Objects in general + assertNull( exec("String a = null; return a?.toString()")); // Call + assertNull( exec("String a = null; return a?.length()")); // Call and box + assertEquals("foo", exec("String a = 'foo'; return a?.toString()")); // Call + assertEquals(Integer.valueOf(3), exec("String a = 'foo'; return a?.length()")); // Call and box + + // Maps + assertNull( exec("Map a = null; return a?.toString()")); // Call + assertNull( exec("Map a = null; return a?.size()")); // Call and box + assertNull( exec("Map a = null; return a?.other")); // Read shortcut + assertEquals("{}", exec("Map a = [:]; return a?.toString()")); // Call + assertEquals(0, exec("Map a = [:]; return a?.size()")); // Call and box + assertEquals(1, exec("Map a = ['other':1]; return a?.other")); // Read shortcut + + // Array + // Since you can't invoke methods on arrays we skip the toString and hashCode tests + assertNull( exec("int[] a = null; return a?.length")); // Length (boxed) + assertEquals(2, exec("int[] a = new int[] {2, 3}; return a?.length")); // Length (boxed) + + // Def + assertNull( exec("def a = null; return a?.getX()")); // Invoke + assertNull( exec("def a = null; return a?.x")); // Read shortcut + assertEquals(0, exec("def a = new org.elasticsearch.painless.FeatureTest(); return a?.getX()")); // Invoke + assertEquals(0, exec("def a = new org.elasticsearch.painless.FeatureTest(); return a?.x")); // Read shortcut + + // Results from maps (should just work but let's test anyway) + FeatureTest t = new FeatureTest(); + assertNull( exec("Map a = ['thing': params.t]; return a.other?.getX()", singletonMap("t", t), true)); + assertNull( exec("Map a = ['thing': params.t]; return a.other?.x", singletonMap("t", t), true)); + assertNull( exec("def a = ['thing': params.t]; return a.other?.getX()", singletonMap("t", t), true)); + assertNull( exec("def a = ['thing': params.t]; return a.other?.x", singletonMap("t", t), true)); + assertEquals(0, exec("Map a = ['other': params.t]; return a.other?.getX()", singletonMap("t", t), true)); + assertEquals(0, exec("Map a = ['other': params.t]; return a.other?.x", singletonMap("t", t), true)); + assertEquals(0, exec("def a = ['other': params.t]; return a.other?.getX()", singletonMap("t", t), true)); + assertEquals(0, exec("def a = ['other': params.t]; return a.other?.x", singletonMap("t", t), true)); + + // Chains + assertNull( exec("Map a = ['thing': ['cat': params.t]]; return a.other?.cat?.getX()", singletonMap("t", t), true)); + assertNull( exec("Map a = ['thing': ['cat': params.t]]; return a.other?.cat?.x", singletonMap("t", t), true)); + assertNull( exec("def a = ['thing': ['cat': params.t]]; return a.other?.cat?.getX()", singletonMap("t", t), true)); + assertNull( exec("def a = ['thing': ['cat': params.t]]; return a.other?.cat?.x", singletonMap("t", t), true)); + assertEquals(0, exec("Map a = ['other': ['cat': params.t]]; return a.other?.cat?.getX()", singletonMap("t", t), true)); + assertEquals(0, exec("Map a = ['other': ['cat': params.t]]; return a.other?.cat?.x", singletonMap("t", t), true)); + assertEquals(0, exec("def a = ['other': ['cat': params.t]]; return a.other?.cat?.getX()", singletonMap("t", t), true)); + assertEquals(0, exec("def a = ['other': ['cat': params.t]]; return a.other?.cat?.x", singletonMap("t", t), true)); + + // Check that we don't try to cast when the LHS doesn't provide an "expected" value + assertNull(exec( + "def a = [:];\n" + + "a.missing_length = a.missing?.length();\n" + + "return a.missing_length", true)); + assertEquals(3, exec( + "def a = [:];\n" + + "a.missing = 'foo';\n" + + "a.missing_length = a.missing?.length();\n" + + "return a.missing_length", true)); + + // Writes, all unsupported at this point +// assertEquals(null, exec("org.elasticsearch.painless.FeatureTest a = null; return a?.x")); // Read field +// assertEquals(null, exec("org.elasticsearch.painless.FeatureTest a = null; a?.x = 7; return a?.x")); // Write field +// assertEquals(null, exec("Map a = null; a?.other = 'wow'; return a?.other")); // Write shortcut +// assertEquals(null, exec("def a = null; a?.other = 'cat'; return a?.other")); // Write shortcut +// assertEquals(null, exec("Map a = ['thing': 'bar']; a.other?.cat = 'no'; return a.other?.cat")); +// assertEquals(null, exec("def a = ['thing': 'bar']; a.other?.cat = 'no'; return a.other?.cat")); +// assertEquals(null, exec("Map a = ['thing': 'bar']; a.other?.cat?.dog = 'wombat'; return a.other?.cat?.dog")); +// assertEquals(null, exec("def a = ['thing': 'bar']; a.other?.cat?.dog = 'wombat'; return a.other?.cat?.dog")); + } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LangPainlessClientYamlTestSuiteIT.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LangPainlessClientYamlTestSuiteIT.java index ca95dafd0b8..55d3f1c8101 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LangPainlessClientYamlTestSuiteIT.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LangPainlessClientYamlTestSuiteIT.java @@ -37,7 +37,7 @@ public class LangPainlessClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ListTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ListTests.java new file mode 100644 index 00000000000..1ae7ca0bc4f --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ListTests.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.hamcrest.Matcher; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +/** Tests for working with lists. */ +public class ListTests extends ArrayLikeObjectTestCase { + @Override + protected String declType(String valueType) { + return "List"; + } + + @Override + protected String valueCtorCall(String valueType, int size) { + String[] fill = new String[size]; + Arrays.fill(fill, fillValue(valueType)); + return "[" + String.join(",", fill) + "]"; + } + + private String fillValue(String valueType) { + switch (valueType) { + case "int": return "0"; + case "long": return "0L"; + case "short": return "(short) 0"; + case "byte": return "(byte) 0"; + case "float": return "0.0f"; + case "double": return "0.0"; // Double is implicit for decimal constants + default: return null; + } + } + + @Override + protected Matcher outOfBoundsExceptionMessageMatcher(int index, int size) { + if ("1.8".equals(Runtime.class.getPackage().getSpecificationVersion())) { + if (index > size) { + return equalTo("Index: " + index + ", Size: " + size); + } + Matcher matcher = equalTo(Integer.toString(index)); + // If we set -XX:-OmitStackTraceInFastThrow we wouldn't need this + matcher = anyOf(matcher, nullValue()); + return matcher; + } else { + // This exception is locale dependent so we attempt to reproduce it + List list = new ArrayList<>(); + for (int i = 0; i < size; i++) { + list.add(new Object()); + } + Exception e = expectThrows(IndexOutOfBoundsException.class, () -> list.get(index)); + return equalTo(e.getMessage()); + } + } + +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/MapTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/MapTests.java new file mode 100644 index 00000000000..034213e74be --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/MapTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import static java.util.Collections.singletonMap; + +/** Tests for working with maps. */ +public class MapTests extends ScriptTestCase { + private void mapAccessesTestCase(String listType) { + Object val = randomFrom("test", 1, 1.3, new Object()); + String decl = listType + " x = ['a': 1, 'b': 2, 0: 2, -5: 'slot', 123.1: 12]"; + assertEquals(5, exec(decl + "; return x.size()")); + assertEquals(2, exec(decl + "; return x[0];", true)); + assertEquals(1, exec(decl + "; return x['a'];", true)); + assertEquals(12, exec(decl + "; return x[123.1];", true)); + assertEquals(val, exec(decl + "; x[ 0] = params.val; return x[ 0];", singletonMap("val", val), true)); + assertEquals("slot", exec(decl + "; x[ 0] = params.val; return x[-5];", singletonMap("val", val), true)); + assertEquals(val, exec(decl + "; x[-5] = params.val; return x[-5];", singletonMap("val", val), true)); + } + + public void testMapInDefAccesses() { + mapAccessesTestCase("def"); + } + + public void testMapAccesses() { + mapAccessesTestCase("Map"); + } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java index 3fe071c5221..2de25ba54b0 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java @@ -22,7 +22,7 @@ package org.elasticsearch.painless; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.script.CompiledScript; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESSingleNodeTestCase; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptEngineTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptEngineTests.java index 2cd21c0596f..89385c8862c 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptEngineTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptEngineTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.painless; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import java.util.Arrays; import java.util.Collections; @@ -83,7 +83,7 @@ public class ScriptEngineTests extends ScriptTestCase { Object compiledScript = scriptEngine.compile(null, "return ctx.value;", Collections.emptyMap()); - ExecutableScript script = scriptEngine.executable(new CompiledScript(ScriptService.ScriptType.INLINE, + ExecutableScript script = scriptEngine.executable(new CompiledScript(ScriptType.INLINE, "testChangingVarsCrossExecution1", "painless", compiledScript), vars); ctx.put("value", 1); @@ -99,7 +99,7 @@ public class ScriptEngineTests extends ScriptTestCase { Map vars = new HashMap<>(); Object compiledScript = scriptEngine.compile(null, "return params['value'];", Collections.emptyMap()); - ExecutableScript script = scriptEngine.executable(new CompiledScript(ScriptService.ScriptType.INLINE, + ExecutableScript script = scriptEngine.executable(new CompiledScript(ScriptType.INLINE, "testChangingVarsCrossExecution2", "painless", compiledScript), vars); script.setNextVar("value", 1); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 672204cbc25..10149674bea 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -26,7 +26,7 @@ import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptException; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -83,7 +83,7 @@ public abstract class ScriptTestCase extends ESTestCase { } // test actual script execution Object object = scriptEngine.compile(null, script, compileParams); - CompiledScript compiled = new CompiledScript(ScriptService.ScriptType.INLINE, getTestName(), "painless", object); + CompiledScript compiled = new CompiledScript(ScriptType.INLINE, getTestName(), "painless", object); ExecutableScript executableScript = scriptEngine.executable(compiled, vars); if (scorer != null) { ((ScorerAware)executableScript).setScorer(scorer); diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/15_update.yaml b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/15_update.yaml index 8e7e3d787e2..9da3761d1b1 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/15_update.yaml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/15_update.yaml @@ -58,4 +58,83 @@ - match: { _source.foo: yyy } - match: { _source.count: 1 } - + + - do: + update: + index: test_1 + type: test + id: 1 + body: + script: + lang: painless + inline: "ctx._source.missing_length = ctx._source.missing?.length()" + + - match: { _index: test_1 } + - match: { _type: test } + - match: { _id: "1" } + - match: { _version: 4 } + + - do: + get: + index: test_1 + type: test + id: 1 + + - match: { _source.foo: yyy } + - match: { _source.count: 1 } + - is_false: _source.missing + - is_false: _source.missing_length + + - do: + update: + index: test_1 + type: test + id: 1 + body: + script: + lang: painless + inline: "ctx._source.foo_length = ctx._source.foo?.length()" + + - match: { _index: test_1 } + - match: { _type: test } + - match: { _id: "1" } + - match: { _version: 5 } + + - do: + get: + index: test_1 + type: test + id: 1 + + - match: { _source.foo: yyy } + - match: { _source.foo_length: 3 } + - match: { _source.count: 1 } + - is_false: _source.missing + - is_false: _source.missing_length + +--- +"Update Script with script error": + - do: + index: + index: test_1 + type: test + id: 2 + body: + foo: bar + count: 1 + + - do: + catch: request + update: + index: test_1 + type: test + id: 2 + body: + script: + lang: painless + inline: "for (def key : params.keySet()) { ctx._source[key] = params[key]}" + params: { bar: 'xxx' } + + - match: { error.root_cause.0.type: "remote_transport_exception" } + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "Object has already been built and is self-referencing itself" } diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/20_scriptfield.yaml b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/20_scriptfield.yaml index b92012959d1..902c6950245 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/20_scriptfield.yaml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/20_scriptfield.yaml @@ -10,6 +10,8 @@ setup: properties: foo: type: keyword + missing: + type: keyword - do: index: index: test @@ -20,17 +22,62 @@ setup: indices.refresh: {} --- - "Scripted Field": - do: search: body: script_fields: bar: - script: + script: inline: "doc['foo'].value + params.x;" - lang: painless params: x: "bbb" - match: { hits.hits.0.fields.bar.0: "aaabbb"} + +--- +"Scripted Field with a null safe dereference (non-null)": + - do: + search: + body: + script_fields: + bar: + script: + inline: "doc['foo'].value?.length() + params.x;" + params: + x: 5 + + - match: { hits.hits.0.fields.bar.0: 8} + +--- +"Scripted Field with a null safe dereference (null)": + # Change this to ?: once we have it implemented + - do: + search: + body: + script_fields: + bar: + script: + inline: "(doc['missing'].value?.length() == null ? 0 : doc['missing'].value?.length()) + params.x;" + params: + x: 5 + + - match: { hits.hits.0.fields.bar.0: 5} + +--- +"Scripted Field with script error": + - do: + catch: request + search: + body: + script_fields: + bar: + script: + inline: "while (true) {}" + + - match: { error.root_cause.0.type: "script_exception" } + - match: { error.root_cause.0.reason: "compile error" } + - match: { error.caused_by.type: "script_exception" } + - match: { error.caused_by.reason: "compile error" } + - match: { error.caused_by.caused_by.type: "illegal_argument_exception" } + - match: { error.caused_by.caused_by.reason: "While loop has no escape." } diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/25_script_upsert.yaml b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/25_script_upsert.yaml index 2adf0de747f..3be567f2acb 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/25_script_upsert.yaml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/25_script_upsert.yaml @@ -63,4 +63,24 @@ - match: { _source.foo: xxx } + - do: + update: + index: test_1 + type: test + id: 3 + body: + script: + inline: "ctx._source.has_now = ctx._now > 0" + lang: "painless" + upsert: { has_now: false } + scripted_upsert: true + + - do: + get: + index: test_1 + type: test + id: 3 + + - match: { _source.has_now: true } + diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/30_search.yaml b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/30_search.yaml index f1051ba7106..d92c0e41e6c 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/30_search.yaml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/30_search.yaml @@ -6,19 +6,19 @@ index: test type: test id: 1 - body: { "test": "value beck", "num1": 1.0 } + body: { "test": "value beck", "num1": 1.0, "bool": true } - do: index: index: test type: test id: 2 - body: { "test": "value beck", "num1": 2.0 } + body: { "test": "value beck", "num1": 2.0, "bool": false } - do: index: index: test type: test id: 3 - body: { "test": "value beck", "num1": 3.0 } + body: { "test": "value beck", "num1": 3.0, "bool": true } - do: indices.refresh: {} @@ -95,6 +95,19 @@ - match: { hits.hits.1.fields.sNum1.0: 2.0 } - match: { hits.hits.2.fields.sNum1.0: 3.0 } + - do: + index: test + search: + body: + query: + script: + script: + inline: "doc['bool'].value == false" + lang: painless + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "2" } + --- "Custom Script Boost": diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequest.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequest.java index a18c12bb769..d8bb91f2fb0 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequest.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequest.java @@ -89,7 +89,7 @@ public class MultiPercolateRequest extends ActionRequest /** * Embeds a percolate request which request body is defined as raw bytes to this multi percolate request */ - public MultiPercolateRequest add(BytesReference data, boolean allowExplicitIndex) throws Exception { + public MultiPercolateRequest add(BytesReference data, boolean allowExplicitIndex) throws IOException { XContent xContent = XContentFactory.xContent(data); int from = 0; int length = data.length(); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 74ce3a5be1e..4236e9e0c0b 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -33,6 +33,7 @@ import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.Weight; @@ -50,7 +51,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -366,6 +366,9 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder(restChannel)); + return channel -> client.execute(MultiPercolateAction.INSTANCE, multiPercolateRequest, new RestToXContentListener<>(channel)); } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java index 35d9f2c604a..0c9eaa4d12d 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/RestPercolateAction.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.percolator; import org.elasticsearch.action.get.GetRequest; @@ -32,11 +33,14 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestToXContentListener; +import java.io.IOException; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; @Deprecated public class RestPercolateAction extends BaseRestHandler { + @Inject public RestPercolateAction(Settings settings, RestController controller) { super(settings); @@ -56,7 +60,7 @@ public class RestPercolateAction extends BaseRestHandler { controller.registerHandler(POST, "/{index}/{type}/{id}/_percolate/count", countExistingDocHandler); } - void parseDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, RestChannel restChannel, NodeClient client) { + private RestChannelConsumer parseDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, NodeClient client) { percolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("index"))); percolateRequest.documentType(restRequest.param("type")); percolateRequest.routing(restRequest.param("routing")); @@ -64,11 +68,10 @@ public class RestPercolateAction extends BaseRestHandler { percolateRequest.source(RestActions.getRestContent(restRequest)); percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions())); - executePercolate(client, percolateRequest, restChannel); + return channel -> executePercolate(client, percolateRequest, channel); } - void parseExistingDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, RestChannel restChannel, - NodeClient client) { + private RestChannelConsumer parseExistingDocPercolate(PercolateRequest percolateRequest, RestRequest restRequest, NodeClient client) { String index = restRequest.param("index"); String type = restRequest.param("type"); percolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param("percolate_index", index))); @@ -89,57 +92,61 @@ public class RestPercolateAction extends BaseRestHandler { percolateRequest.source(RestActions.getRestContent(restRequest)); percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions())); - executePercolate(client, percolateRequest, restChannel); + return channel -> executePercolate(client, percolateRequest, channel); } - void executePercolate(final NodeClient client, final PercolateRequest percolateRequest, final RestChannel restChannel) { + private void executePercolate(final NodeClient client, final PercolateRequest percolateRequest, final RestChannel restChannel) { client.execute(PercolateAction.INSTANCE, percolateRequest, new RestToXContentListener<>(restChannel)); } @Override - public void handleRequest(RestRequest restRequest, RestChannel restChannel, final NodeClient client) { + public RestChannelConsumer prepareRequest(RestRequest restRequest, final NodeClient client) throws IOException { PercolateRequest percolateRequest = new PercolateRequest(); - parseDocPercolate(percolateRequest, restRequest, restChannel, client); + return parseDocPercolate(percolateRequest, restRequest, client); } - final class RestCountPercolateDocHandler extends BaseRestHandler { + private final class RestCountPercolateDocHandler extends BaseRestHandler { private RestCountPercolateDocHandler(Settings settings) { super(settings); } @Override - public void handleRequest(RestRequest restRequest, RestChannel restChannel, final NodeClient client) { + public RestChannelConsumer prepareRequest(RestRequest restRequest, final NodeClient client) throws IOException { PercolateRequest percolateRequest = new PercolateRequest(); percolateRequest.onlyCount(true); - parseDocPercolate(percolateRequest, restRequest, restChannel, client); + return parseDocPercolate(percolateRequest, restRequest, client); } + } - final class RestPercolateExistingDocHandler extends BaseRestHandler { + private final class RestPercolateExistingDocHandler extends BaseRestHandler { - protected RestPercolateExistingDocHandler(Settings settings) { + RestPercolateExistingDocHandler(Settings settings) { super(settings); } @Override - public void handleRequest(RestRequest restRequest, RestChannel restChannel, final NodeClient client) { + public RestChannelConsumer prepareRequest(RestRequest restRequest, final NodeClient client) throws IOException { PercolateRequest percolateRequest = new PercolateRequest(); - parseExistingDocPercolate(percolateRequest, restRequest, restChannel, client); + return parseExistingDocPercolate(percolateRequest, restRequest, client); } + } - final class RestCountPercolateExistingDocHandler extends BaseRestHandler { + private final class RestCountPercolateExistingDocHandler extends BaseRestHandler { - protected RestCountPercolateExistingDocHandler(Settings settings) { + RestCountPercolateExistingDocHandler(Settings settings) { super(settings); } @Override - public void handleRequest(RestRequest restRequest, RestChannel restChannel, final NodeClient client) { + public RestChannelConsumer prepareRequest(RestRequest restRequest, final NodeClient client) throws IOException { PercolateRequest percolateRequest = new PercolateRequest(); percolateRequest.onlyCount(true); - parseExistingDocPercolate(percolateRequest, restRequest, restChannel, client); + return parseExistingDocPercolate(percolateRequest, restRequest, client); } + } + } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 8ac9890afa8..1526823369f 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -47,6 +47,7 @@ import org.apache.lucene.search.FilterScorer; import org.apache.lucene.search.FilteredDocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; @@ -61,7 +62,6 @@ import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index cac770f415d..97535b42093 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -40,9 +40,9 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import org.hamcrest.Matchers; @@ -142,7 +142,7 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index c08586533e2..fad3cc27d23 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -292,7 +292,8 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { .field(fieldName, queryBuilder) .endObject().bytes()); BytesRef qbSource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); - assertQueryBuilder(qbSource, queryBuilder.rewrite(indexService.newQueryShardContext())); + assertQueryBuilder(qbSource, queryBuilder.rewrite(indexService.newQueryShardContext( + randomInt(20), null, () -> { throw new UnsupportedOperationException(); }))); } @@ -476,7 +477,9 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { private void assertQueryBuilder(BytesRef actual, QueryBuilder expected) throws IOException { XContentParser sourceParser = PercolatorFieldMapper.QUERY_BUILDER_CONTENT_TYPE.xContent() .createParser(actual.bytes, actual.offset, actual.length); - QueryParseContext qsc = indexService.newQueryShardContext().newParseContext(sourceParser); + QueryParseContext qsc = indexService.newQueryShardContext( + randomInt(20), null, () -> { throw new UnsupportedOperationException(); }) + .newParseContext(sourceParser); assertThat(qsc.parseInnerQueryBuilder().get(), equalTo(expected)); } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java index 7d10b831bc8..197a82f2ccc 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java @@ -42,6 +42,9 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.functionscore.WeightBuilder; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.test.ESIntegTestCase; @@ -59,13 +62,13 @@ import java.util.Map; import java.util.NavigableSet; import java.util.Set; import java.util.TreeSet; +import java.util.function.Function; import static org.elasticsearch.percolator.PercolateSourceBuilder.docBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; -import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery; import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery; @@ -99,6 +102,7 @@ public class PercolatorIT extends ESIntegTestCase { return Collections.singleton(PercolatorPlugin.class); } + @Override protected Collection> transportClientPlugins() { return Collections.singleton(PercolatorPlugin.class); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index b21c131a625..665b9926a58 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.percolator; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -31,12 +32,20 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESSingleNodeTestCase; +import java.io.IOException; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; @@ -49,7 +58,13 @@ import static org.elasticsearch.index.query.QueryBuilders.spanNearQuery; import static org.elasticsearch.index.query.QueryBuilders.spanNotQuery; import static org.elasticsearch.index.query.QueryBuilders.spanTermQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.percolator.PercolateSourceBuilder.docBuilder; +import static org.elasticsearch.percolator.PercolatorTestUtil.assertMatchCount; +import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray; +import static org.elasticsearch.percolator.PercolatorTestUtil.preparePercolate; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -58,7 +73,33 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(PercolatorPlugin.class); + return Arrays.asList(PercolatorPlugin.class, CustomScriptPlugin.class); + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + @Override + protected Map, Object>> pluginScripts() { + Map, Object>> scripts = new HashMap<>(); + scripts.put("1==1", vars -> Boolean.TRUE); + return scripts; + } + } + + public void testPercolateScriptQuery() throws IOException { + client().admin().indices().prepareCreate("index").addMapping("type", "query", "type=percolator").get(); + ensureGreen(); + client().prepareIndex("index", "type", "1") + .setSource(jsonBuilder().startObject().field("query", QueryBuilders.scriptQuery( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "1==1", Collections.emptyMap()))).endObject()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .execute().actionGet(); + PercolateResponse response = preparePercolate(client()) + .setIndices("index").setDocumentType("type") + .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject())) + .execute().actionGet(); + assertMatchCount(response, 1L); + assertThat(response.getMatches(), arrayWithSize(1)); + assertThat(convertFromTextArray(response.getMatches(), "index"), arrayContainingInAnyOrder("1")); } public void testPercolatorQuery() throws Exception { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index 1b8b123aa13..4410ac8012b 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; @@ -38,7 +39,6 @@ import org.apache.lucene.search.spans.SpanNotQuery; import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.lucene.search.function.RandomScoreFunction; import org.elasticsearch.percolator.QueryAnalyzer.Result; diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index fa4c44d9224..be8fb7defc0 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -26,13 +26,13 @@ esplugin { integTest { cluster { // Whitelist reindexing from the local node so we can test it. - setting 'reindex.remote.whitelist', 'myself' + setting 'reindex.remote.whitelist', '127.0.0.1:*' } } run { // Whitelist reindexing from the local node so we can test it. - setting 'reindex.remote.whitelist', 'myself' + setting 'reindex.remote.whitelist', '127.0.0.1:*' } dependencies { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 32824e969d9..d8e935b5022 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -65,7 +65,7 @@ import static org.elasticsearch.search.sort.SortBuilders.fieldSort; */ public abstract class AbstractAsyncBulkByScrollAction> { protected final Logger logger; - protected final BulkByScrollTask task; + protected final WorkingBulkByScrollTask task; protected final ThreadPool threadPool; /** * The request for this action. Named mainRequest because we create lots of request variables all representing child @@ -81,7 +81,7 @@ public abstract class AbstractAsyncBulkByScrollAction listener) { this.task = task; this.logger = logger; @@ -256,22 +256,21 @@ public abstract class AbstractAsyncBulkByScrollAction, ScrollableHitSource.Hit, RequestWrapper> scriptApplier; - public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, + public AbstractAsyncBulkIndexByScrollAction(WorkingBulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, Request mainRequest, ActionListener listener, ScriptService scriptService, ClusterState clusterState) { @@ -154,9 +154,9 @@ public abstract class AbstractAsyncBulkIndexByScrollAction> { + interface RequestWrapper> { void setIndex(String index); @@ -422,7 +422,7 @@ public abstract class AbstractAsyncBulkIndexByScrollAction, ScrollableHitSource.Hit, RequestWrapper> { - private final BulkByScrollTask task; + private final WorkingBulkByScrollTask task; private final ScriptService scriptService; private final Script script; private final Map params; @@ -430,7 +430,7 @@ public abstract class AbstractAsyncBulkIndexByScrollAction context; - public ScriptApplier(BulkByScrollTask task, ScriptService scriptService, Script script, + public ScriptApplier(WorkingBulkByScrollTask task, ScriptService scriptService, Script script, Map params) { this.task = task; this.scriptService = scriptService; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java index faa2607ce20..451b1199055 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java @@ -26,15 +26,11 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchRequestParsers; -import org.elasticsearch.search.aggregations.AggregatorParsers; -import org.elasticsearch.search.suggest.Suggesters; import org.elasticsearch.tasks.LoggingTaskListener; import org.elasticsearch.tasks.Task; @@ -59,8 +55,8 @@ public abstract class AbstractBaseReindexRestHandler< this.action = action; } - protected void handleRequest(RestRequest request, RestChannel channel, NodeClient client, - boolean includeCreated, boolean includeUpdated) throws IOException { + protected RestChannelConsumer doPrepareRequest(RestRequest request, NodeClient client, + boolean includeCreated, boolean includeUpdated) throws IOException { // Build the internal request Request internal = setCommonOptions(request, buildRequest(request)); @@ -70,8 +66,7 @@ public abstract class AbstractBaseReindexRestHandler< params.put(BulkByScrollTask.Status.INCLUDE_CREATED, Boolean.toString(includeCreated)); params.put(BulkByScrollTask.Status.INCLUDE_UPDATED, Boolean.toString(includeUpdated)); - client.executeLocally(action, internal, new BulkIndexByScrollResponseContentListener(channel, params)); - return; + return channel -> client.executeLocally(action, internal, new BulkIndexByScrollResponseContentListener(channel, params)); } else { internal.setShouldStoreResult(true); } @@ -83,10 +78,9 @@ public abstract class AbstractBaseReindexRestHandler< */ ActionRequestValidationException validationException = internal.validate(); if (validationException != null) { - channel.sendResponse(new BytesRestResponse(channel, validationException)); - return; + throw validationException; } - sendTask(channel, client.executeLocally(action, internal, LoggingTaskListener.instance())); + return sendTask(client.executeLocally(action, internal, LoggingTaskListener.instance())); } /** @@ -103,6 +97,7 @@ public abstract class AbstractBaseReindexRestHandler< request.setRefresh(restRequest.paramAsBoolean("refresh", request.isRefresh())); request.setTimeout(restRequest.paramAsTime("timeout", request.getTimeout())); + request.setSlices(restRequest.paramAsInt("slices", request.getSlices())); String waitForActiveShards = restRequest.param("wait_for_active_shards"); if (waitForActiveShards != null) { @@ -116,13 +111,15 @@ public abstract class AbstractBaseReindexRestHandler< return request; } - private void sendTask(RestChannel channel, Task task) throws IOException { - try (XContentBuilder builder = channel.newBuilder()) { - builder.startObject(); - builder.field("task", clusterService.localNode().getId() + ":" + task.getId()); - builder.endObject(); - channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); - } + private RestChannelConsumer sendTask(Task task) throws IOException { + return channel -> { + try (XContentBuilder builder = channel.newBuilder()) { + builder.startObject(); + builder.field("task", clusterService.localNode().getId() + ":" + task.getId()); + builder.endObject(); + channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); + } + }; } /** diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java index 1329aeb6d8c..d3463cdb61f 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java @@ -98,16 +98,33 @@ public abstract class AbstractBulkByScrollRequest 1) { + return new ParentBulkByScrollTask(id, type, action, getDescription(), parentTaskId, slices); + } + /* Extract the slice from the search request so it'll be available in the status. This is potentially useful for users that manually + * slice their search requests so they can keep track of it and **absolutely** useful for automatically sliced reindex requests so + * they can properly track the responses. */ + Integer sliceId = searchRequest.source().slice() == null ? null : searchRequest.source().slice().getId(); + return new WorkingBulkByScrollTask(id, type, action, getDescription(), parentTaskId, sliceId, requestsPerSecond); } @Override @@ -331,6 +401,11 @@ public abstract class AbstractBulkByScrollRequest 1) { + throw new IllegalArgumentException("Attempting to send sliced reindex-style request to a node that doesn't support " + + "it. Version is [" + out.getVersion() + "] but must be [" + BulkByScrollTask.V_5_1_0_UNRELEASED + "]"); + } + } } /** diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java index 4d9c779f487..185c271e70f 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java @@ -141,4 +141,12 @@ public abstract class AbstractBulkByScrollRequestBuilder< request.setShouldStoreResult(shouldStoreResult); return self(); } + + /** + * The number of slices this task should be divided into. Defaults to 1 meaning the task isn't sliced into subtasks. + */ + public Self setSlices(int workers) { + request.setSlices(workers); + return self(); + } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequest.java index 10fb0bc676e..62c2635b301 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequest.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.script.Script; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; @@ -34,11 +35,21 @@ public abstract class AbstractBulkIndexByScrollRequest delayedPrepareBulkRequestReference = new AtomicReference<>(); - - public BulkByScrollTask(long id, String type, String action, String description, TaskId parentTask, float requestsPerSecond) { - super(id, type, action, description, parentTask); - setRequestsPerSecond(requestsPerSecond); - } - - @Override - protected void onCancelled() { - // Drop the throttle to 0, immediately rescheduling all outstanding tasks so the task will wake up and cancel itself. - rethrottle(0); - } - - @Override - public Status getStatus() { - return new Status(total.get(), updated.get(), created.get(), deleted.get(), batch.get(), versionConflicts.get(), noops.get(), - bulkRetries.get(), searchRetries.get(), timeValueNanos(throttledNanos.get()), getRequestsPerSecond(), getReasonCancelled(), - throttledUntil()); - } - - private TimeValue throttledUntil() { - DelayedPrepareBulkRequest delayed = delayedPrepareBulkRequestReference.get(); - if (delayed == null) { - return timeValueNanos(0); - } - if (delayed.future == null) { - return timeValueNanos(0); - } - return timeValueNanos(max(0, delayed.future.getDelay(TimeUnit.NANOSECONDS))); + public BulkByScrollTask(long id, String type, String action, String description, TaskId parentTaskId) { + super(id, type, action, description, parentTaskId); } /** - * Total number of successfully processed documents. + * The number of sub-slices that are still running. {@link WorkingBulkByScrollTask} will always have 0 and + * {@link ParentBulkByScrollTask} will return the number of waiting tasks. Used by {@link TransportRethrottleAction} to decide how to + * perform the rethrottling. */ - public long getSuccessfullyProcessed() { - return updated.get() + created.get() + deleted.get(); - } + abstract int runningSliceSubTasks(); - public static class Status implements Task.Status { + /** + * Apply the {@code newRequestsPerSecond}. + */ + abstract void rethrottle(float newRequestsPerSecond); + + /* + * Overridden to force children to return compatible status. + */ + public abstract BulkByScrollTask.Status getStatus(); + + /** + * Build the status for this task given a snapshot of the information of running slices. + */ + public abstract TaskInfo getInfoGivenSliceInfo(String localNodeId, List sliceInfo); + + public static class Status implements Task.Status, SuccessfullyProcessed { public static final String NAME = "bulk-by-scroll"; /** @@ -126,6 +88,7 @@ public class BulkByScrollTask extends CancellableTask { */ public static final String INCLUDE_UPDATED = "include_updated"; + private final Integer sliceId; private final long total; private final long updated; private final long created; @@ -139,10 +102,12 @@ public class BulkByScrollTask extends CancellableTask { private final float requestsPerSecond; private final String reasonCancelled; private final TimeValue throttledUntil; + private final List sliceStatuses; - public Status(long total, long updated, long created, long deleted, int batches, long versionConflicts, long noops, + public Status(Integer sliceId, long total, long updated, long created, long deleted, int batches, long versionConflicts, long noops, long bulkRetries, long searchRetries, TimeValue throttled, float requestsPerSecond, @Nullable String reasonCancelled, TimeValue throttledUntil) { + this.sliceId = sliceId == null ? null : checkPositive(sliceId, "sliceId"); this.total = checkPositive(total, "total"); this.updated = checkPositive(updated, "updated"); this.created = checkPositive(created, "created"); @@ -156,9 +121,77 @@ public class BulkByScrollTask extends CancellableTask { this.requestsPerSecond = requestsPerSecond; this.reasonCancelled = reasonCancelled; this.throttledUntil = throttledUntil; + this.sliceStatuses = emptyList(); + } + + /** + * Constructor merging many statuses. + * + * @param sliceStatuses Statuses of sub requests that this task was sliced into. + * @param reasonCancelled Reason that this *this* task was cancelled. Note that each entry in {@code sliceStatuses} can be cancelled + * independently of this task but if this task is cancelled then the workers *should* be cancelled. + */ + public Status(List sliceStatuses, @Nullable String reasonCancelled) { + sliceId = null; + this.reasonCancelled = reasonCancelled; + + long mergedTotal = 0; + long mergedUpdated = 0; + long mergedCreated = 0; + long mergedDeleted = 0; + int mergedBatches = 0; + long mergedVersionConflicts = 0; + long mergedNoops = 0; + long mergedBulkRetries = 0; + long mergedSearchRetries = 0; + long mergedThrottled = 0; + float mergedRequestsPerSecond = 0; + long mergedThrottledUntil = Long.MAX_VALUE; + + for (StatusOrException slice : sliceStatuses) { + if (slice == null) { + // Hasn't returned yet. + continue; + } + if (slice.status == null) { + // This slice failed catastrophically so it doesn't count towards the status + continue; + } + mergedTotal += slice.status.getTotal(); + mergedUpdated += slice.status.getUpdated(); + mergedCreated += slice.status.getCreated(); + mergedDeleted += slice.status.getDeleted(); + mergedBatches += slice.status.getBatches(); + mergedVersionConflicts += slice.status.getVersionConflicts(); + mergedNoops += slice.status.getNoops(); + mergedBulkRetries += slice.status.getBulkRetries(); + mergedSearchRetries += slice.status.getSearchRetries(); + mergedThrottled += slice.status.getThrottled().nanos(); + mergedRequestsPerSecond += slice.status.getRequestsPerSecond(); + mergedThrottledUntil = min(mergedThrottledUntil, slice.status.getThrottledUntil().nanos()); + } + + total = mergedTotal; + updated = mergedUpdated; + created = mergedCreated; + deleted = mergedDeleted; + batches = mergedBatches; + versionConflicts = mergedVersionConflicts; + noops = mergedNoops; + bulkRetries = mergedBulkRetries; + searchRetries = mergedSearchRetries; + throttled = timeValueNanos(mergedThrottled); + requestsPerSecond = mergedRequestsPerSecond; + throttledUntil = timeValueNanos(mergedThrottledUntil == Long.MAX_VALUE ? 0 : mergedThrottledUntil); + this.sliceStatuses = sliceStatuses; } public Status(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) { + sliceId = in.readOptionalVInt(); + } else { + sliceId = null; + } total = in.readVLong(); updated = in.readVLong(); created = in.readVLong(); @@ -172,10 +205,18 @@ public class BulkByScrollTask extends CancellableTask { requestsPerSecond = in.readFloat(); reasonCancelled = in.readOptionalString(); throttledUntil = new TimeValue(in); + if (in.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) { + sliceStatuses = in.readList(stream -> stream.readOptionalWriteable(StatusOrException::new)); + } else { + sliceStatuses = emptyList(); + } } @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) { + out.writeOptionalVInt(sliceId); + } out.writeVLong(total); out.writeVLong(updated); out.writeVLong(created); @@ -189,6 +230,17 @@ public class BulkByScrollTask extends CancellableTask { out.writeFloat(requestsPerSecond); out.writeOptionalString(reasonCancelled); throttledUntil.writeTo(out); + if (out.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) { + out.writeVInt(sliceStatuses.size()); + for (StatusOrException sliceStatus : sliceStatuses) { + out.writeOptionalWriteable(sliceStatus); + } + } + } + + @Override + public String getWriteableName() { + return NAME; } @Override @@ -200,6 +252,9 @@ public class BulkByScrollTask extends CancellableTask { public XContentBuilder innerXContent(XContentBuilder builder, Params params) throws IOException { + if (sliceId != null) { + builder.field("slice_id", sliceId); + } builder.field("total", total); if (params.paramAsBoolean(INCLUDE_UPDATED, true)) { builder.field("updated", updated); @@ -222,6 +277,17 @@ public class BulkByScrollTask extends CancellableTask { builder.field("canceled", reasonCancelled); } builder.timeValueField("throttled_until_millis", "throttled_until", throttledUntil); + if (false == sliceStatuses.isEmpty()) { + builder.startArray("slices"); + for (StatusOrException slice : sliceStatuses) { + if (slice == null) { + builder.nullValue(); + } else { + slice.toXContent(builder, params); + } + } + builder.endArray(); + } return builder; } @@ -234,7 +300,8 @@ public class BulkByScrollTask extends CancellableTask { } public void innerToString(StringBuilder builder) { - builder.append("updated=").append(updated); + builder.append("sliceId=").append(sliceId); + builder.append(",updated=").append(updated); builder.append(",created=").append(created); builder.append(",deleted=").append(deleted); builder.append(",batches=").append(batches); @@ -245,11 +312,16 @@ public class BulkByScrollTask extends CancellableTask { builder.append(",canceled=").append(reasonCancelled); } builder.append(",throttledUntil=").append(throttledUntil); + if (false == sliceStatuses.isEmpty()) { + builder.append(",workers=").append(sliceStatuses); + } } - @Override - public String getWriteableName() { - return NAME; + /** + * The id of the slice that this status is reporting or {@code null} if this isn't the status of a sub-slice. + */ + Integer getSliceId() { + return sliceId; } /** @@ -260,23 +332,17 @@ public class BulkByScrollTask extends CancellableTask { return total; } - /** - * Count of documents updated. - */ + @Override public long getUpdated() { return updated; } - /** - * Count of documents created. - */ + @Override public long getCreated() { return created; } - /** - * Count of successful delete operations. - */ + @Override public long getDeleted() { return deleted; } @@ -344,6 +410,13 @@ public class BulkByScrollTask extends CancellableTask { return throttledUntil; } + /** + * Statuses of the sub requests into which this sub-request was sliced. Empty if this request wasn't sliced into sub-requests. + */ + public List getSliceStatuses() { + return sliceStatuses; + } + private int checkPositive(int value, String name) { if (value < 0) { throw new IllegalArgumentException(name + " must be greater than 0 but was [" + value + "]"); @@ -359,195 +432,84 @@ public class BulkByScrollTask extends CancellableTask { } } - void setTotal(long totalHits) { - total.set(totalHits); - } - - void countBatch() { - batch.incrementAndGet(); - } - - void countNoop() { - noops.incrementAndGet(); - } - - void countCreated() { - created.incrementAndGet(); - } - - void countUpdated() { - updated.incrementAndGet(); - } - - void countDeleted() { - deleted.incrementAndGet(); - } - - void countVersionConflict() { - versionConflicts.incrementAndGet(); - } - - void countBulkRetry() { - bulkRetries.incrementAndGet(); - } - - void countSearchRetry() { - searchRetries.incrementAndGet(); - } - - float getRequestsPerSecond() { - return requestsPerSecond; - } - /** - * Schedule prepareBulkRequestRunnable to run after some delay. This is where throttling plugs into reindexing so the request can be - * rescheduled over and over again. + * The status of a slice of the request. Successful requests store the {@link StatusOrException#status} while failing requests store a + * {@link StatusOrException#exception}. */ - void delayPrepareBulkRequest(ThreadPool threadPool, TimeValue lastBatchStartTime, int lastBatchSize, - AbstractRunnable prepareBulkRequestRunnable) { - // Synchronize so we are less likely to schedule the same request twice. - synchronized (delayedPrepareBulkRequestReference) { - TimeValue delay = throttleWaitTime(lastBatchStartTime, lastBatchSize); - delayedPrepareBulkRequestReference.set(new DelayedPrepareBulkRequest(threadPool, getRequestsPerSecond(), - delay, new RunOnce(prepareBulkRequestRunnable))); - } - } + public static class StatusOrException implements Writeable, ToXContent { + private final Status status; + private final Exception exception; - TimeValue throttleWaitTime(TimeValue lastBatchStartTime, int lastBatchSize) { - long earliestNextBatchStartTime = lastBatchStartTime.nanos() + (long) perfectlyThrottledBatchTime(lastBatchSize); - return timeValueNanos(max(0, earliestNextBatchStartTime - System.nanoTime())); - } - - /** - * How many nanoseconds should a batch of lastBatchSize have taken if it were perfectly throttled? Package private for testing. - */ - float perfectlyThrottledBatchTime(int lastBatchSize) { - if (requestsPerSecond == Float.POSITIVE_INFINITY) { - return 0; - } - // requests - // ------------------- == seconds - // request per seconds - float targetBatchTimeInSeconds = lastBatchSize / requestsPerSecond; - // nanoseconds per seconds * seconds == nanoseconds - return TimeUnit.SECONDS.toNanos(1) * targetBatchTimeInSeconds; - } - - private void setRequestsPerSecond(float requestsPerSecond) { - this.requestsPerSecond = requestsPerSecond; - } - - void rethrottle(float newRequestsPerSecond) { - synchronized (delayedPrepareBulkRequestReference) { - if (logger.isDebugEnabled()) { - logger.debug("[{}]: Rethrottling to [{}] requests per second", getId(), newRequestsPerSecond); - } - setRequestsPerSecond(newRequestsPerSecond); - - DelayedPrepareBulkRequest delayedPrepareBulkRequest = this.delayedPrepareBulkRequestReference.get(); - if (delayedPrepareBulkRequest == null) { - if (logger.isDebugEnabled()) { - logger.debug("[{}]: Skipping rescheduling because there is no scheduled task", getId()); - } - // No request has been queued yet so nothing to reschedule. - return; - } - - this.delayedPrepareBulkRequestReference.set(delayedPrepareBulkRequest.rethrottle(newRequestsPerSecond)); - } - } - - class DelayedPrepareBulkRequest { - private final ThreadPool threadPool; - private final AbstractRunnable command; - private final float requestsPerSecond; - private final ScheduledFuture future; - - DelayedPrepareBulkRequest(ThreadPool threadPool, float requestsPerSecond, TimeValue delay, AbstractRunnable command) { - this.threadPool = threadPool; - this.requestsPerSecond = requestsPerSecond; - this.command = command; - this.future = threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - throttledNanos.addAndGet(delay.nanos()); - command.run(); - } - - @Override - public void onFailure(Exception e) { - command.onFailure(e); - } - }); + public StatusOrException(Status status) { + this.status = status; + exception = null; } - DelayedPrepareBulkRequest rethrottle(float newRequestsPerSecond) { - if (newRequestsPerSecond != 0 && newRequestsPerSecond < requestsPerSecond) { - /* - * The user is attempting to slow the request down. We'll let the change in throttle take effect the next time we delay - * prepareBulkRequest. We can't just reschedule the request further out in the future the bulk context might time out. - */ - if (logger.isDebugEnabled()) { - logger.debug("[{}]: Skipping rescheduling because the new throttle [{}] is slower than the old one [{}].", getId(), - newRequestsPerSecond, requestsPerSecond); - } - return this; - } - - long remainingDelay = future.getDelay(TimeUnit.NANOSECONDS); - // Actually reschedule the task - if (false == FutureUtils.cancel(future)) { - // Couldn't cancel, probably because the task has finished or been scheduled. Either way we have nothing to do here. - if (logger.isDebugEnabled()) { - logger.debug("[{}]: Skipping rescheduling we couldn't cancel the task.", getId()); - } - return this; - } - - /* - * Strangely enough getting here doesn't mean that you actually cancelled the request, just that you probably did. If you stress - * test it you'll find that requests sneak through. So each request is given a runOnce boolean to prevent that. - */ - TimeValue newDelay = newDelay(remainingDelay, newRequestsPerSecond); - if (logger.isDebugEnabled()) { - logger.debug("[{}]: Rescheduling for [{}] in the future.", getId(), newDelay); - } - return new DelayedPrepareBulkRequest(threadPool, requestsPerSecond, newDelay, command); + public StatusOrException(Exception exception) { + status = null; + this.exception = exception; } /** - * Scale back remaining delay to fit the new delay. + * Read from a stream. */ - TimeValue newDelay(long remainingDelay, float newRequestsPerSecond) { - if (remainingDelay < 0 || newRequestsPerSecond == 0) { - return timeValueNanos(0); - } - return timeValueNanos(round(remainingDelay * requestsPerSecond / newRequestsPerSecond)); - } - } - - /** - * Runnable that can only be run one time. This is paranoia to prevent furiously rethrottling from running the command multiple times. - * Without it the command would be run multiple times. - */ - private static class RunOnce extends AbstractRunnable { - private final AtomicBoolean hasRun = new AtomicBoolean(false); - private final AbstractRunnable delegate; - - public RunOnce(AbstractRunnable delegate) { - this.delegate = delegate; - } - - @Override - protected void doRun() throws Exception { - if (hasRun.compareAndSet(false, true)) { - delegate.run(); + public StatusOrException(StreamInput in) throws IOException { + if (in.readBoolean()) { + status = new Status(in); + exception = null; + } else { + status = null; + exception = in.readException(); } } @Override - public void onFailure(Exception e) { - delegate.onFailure(e); + public void writeTo(StreamOutput out) throws IOException { + if (exception == null) { + out.writeBoolean(true); + status.writeTo(out); + } else { + out.writeBoolean(false); + out.writeException(exception); + } + } + + public Status getStatus() { + return status; + } + + public Exception getException() { + return exception; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (exception == null) { + status.toXContent(builder, params); + } else { + builder.startObject(); + ElasticsearchException.toXContent(builder, params, exception); + builder.endObject(); + } + return builder; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != BulkByScrollTask.StatusOrException.class) { + return false; + } + BulkByScrollTask.StatusOrException other = (StatusOrException) obj; + return Objects.equals(status, other.status) + && Objects.equals(exception, other.exception); + } + + @Override + public int hashCode() { + return Objects.hash(status, exception); } } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java index d717c1ad4de..1574a167108 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -29,10 +30,13 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import java.io.IOException; +import java.util.ArrayList; import java.util.List; +import static java.lang.Math.max; import static java.lang.Math.min; import static java.util.Objects.requireNonNull; +import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; /** * Response used for actions that index many documents using a scroll request. @@ -56,6 +60,22 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont this.timedOut = timedOut; } + public BulkIndexByScrollResponse(Iterable toMerge, @Nullable String reasonCancelled) { + long mergedTook = 0; + List statuses = new ArrayList<>(); + bulkFailures = new ArrayList<>(); + searchFailures = new ArrayList<>(); + for (BulkIndexByScrollResponse response : toMerge) { + mergedTook = max(mergedTook, response.getTook().nanos()); + statuses.add(new BulkByScrollTask.StatusOrException(response.status)); + bulkFailures.addAll(response.getBulkFailures()); + searchFailures.addAll(response.getSearchFailures()); + timedOut |= response.isTimedOut(); + } + took = timeValueNanos(mergedTook); + status = new BulkByScrollTask.Status(statuses, reasonCancelled); + } + public TimeValue getTook() { return took; } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 4d5f7623400..7f7ae52b73b 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -197,9 +197,9 @@ public class ClientScrollableHitSource extends ScrollableHitSource { private final SearchHit delegate; private final BytesReference source; - public ClientHit(SearchHit delegate) { + ClientHit(SearchHit delegate) { this.delegate = delegate; - source = delegate.hasSource() ? null : delegate.getSourceRef(); + source = delegate.hasSource() ? delegate.getSourceRef() : null; } @Override diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java index df1f4d387ab..cde5fea926c 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.tasks.TaskId; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -49,9 +50,15 @@ public class DeleteByQueryRequest extends AbstractBulkByScrollRequest> results; + private final AtomicInteger counter; + + public ParentBulkByScrollTask(long id, String type, String action, String description, TaskId parentTaskId, int slices) { + super(id, type, action, description, parentTaskId); + this.results = new AtomicArray<>(slices); + this.counter = new AtomicInteger(slices); + } + + @Override + void rethrottle(float newRequestsPerSecond) { + // Nothing to do because all rethrottling is done on slice sub tasks. + } + + @Override + public Status getStatus() { + // We only have access to the statuses of requests that have finished so we return them + List statuses = Arrays.asList(new StatusOrException[results.length()]); + addResultsToList(statuses); + return new Status(unmodifiableList(statuses), getReasonCancelled()); + } + + @Override + int runningSliceSubTasks() { + return counter.get(); + } + + @Override + public TaskInfo getInfoGivenSliceInfo(String localNodeId, List sliceInfo) { + /* Merge the list of finished sub requests with the provided info. If a slice is both finished and in the list then we prefer the + * finished status because we don't expect them to change after the task is finished. */ + List sliceStatuses = Arrays.asList(new StatusOrException[results.length()]); + for (TaskInfo t : sliceInfo) { + Status status = (Status) t.getStatus(); + sliceStatuses.set(status.getSliceId(), new StatusOrException(status)); + } + addResultsToList(sliceStatuses); + Status status = new Status(sliceStatuses, getReasonCancelled()); + return taskInfo(localNodeId, getDescription(), status); + } + + private void addResultsToList(List sliceStatuses) { + for (AtomicArray.Entry> t : results.asList()) { + if (t.value != null) { + if (t.value.v1() != null) { + sliceStatuses.set(t.index, new StatusOrException(t.value.v1().getStatus())); + } else { + sliceStatuses.set(t.index, new StatusOrException(t.value.v2())); + } + } + } + } + + /** + * Record a response from a slice and respond to the listener if the request is finished. + */ + void onSliceResponse(ActionListener listener, int sliceId, BulkIndexByScrollResponse response) { + results.setOnce(sliceId, new Tuple<>(response, null)); + /* If the request isn't finished we could automatically rethrottle the sub-requests here but we would only want to do that if we + * were fairly sure they had a while left to go. */ + recordSliceCompletionAndRespondIfAllDone(listener); + } + + /** + * Record a failure from a slice and respond to the listener if the request is finished. + */ + void onSliceFailure(ActionListener listener, int sliceId, Exception e) { + results.setOnce(sliceId, new Tuple<>(null, e)); + recordSliceCompletionAndRespondIfAllDone(listener); + // TODO cancel when a slice fails? + } + + private void recordSliceCompletionAndRespondIfAllDone(ActionListener listener) { + if (counter.decrementAndGet() != 0) { + return; + } + List responses = new ArrayList<>(results.length()); + Exception exception = null; + for (AtomicArray.Entry> t : results.asList()) { + if (t.value.v1() == null) { + assert t.value.v2() != null : "exception shouldn't be null if value is null"; + if (exception == null) { + exception = t.value.v2(); + } else { + exception.addSuppressed(t.value.v2()); + } + } else { + assert t.value.v2() == null : "exception should be null if response is not null"; + responses.add(t.value.v1()); + } + } + if (exception == null) { + listener.onResponse(new BulkIndexByScrollResponse(responses, getReasonCancelled())); + } else { + listener.onFailure(exception); + } + } + +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexParallelizationHelper.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexParallelizationHelper.java new file mode 100644 index 00000000000..b2dbd51f381 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexParallelizationHelper.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.slice.SliceBuilder; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskManager; + +/** + * Helps parallelize reindex requests using sliced scrolls. + */ +public class ReindexParallelizationHelper { + private ReindexParallelizationHelper() {} + + public static < + Request extends AbstractBulkByScrollRequest + > void startSlices(Client client, TaskManager taskManager, Action action, + String localNodeId, ParentBulkByScrollTask task, Request request, ActionListener listener) { + TaskId parentTaskId = new TaskId(localNodeId, task.getId()); + for (final SearchRequest slice : sliceIntoSubRequests(request.getSearchRequest(), UidFieldMapper.NAME, request.getSlices())) { + // TODO move the request to the correct node. maybe here or somehow do it as part of startup for reindex in general.... + Request requestForSlice = request.forSlice(parentTaskId, slice); + ActionListener sliceListener = ActionListener.wrap( + r -> task.onSliceResponse(listener, slice.source().slice().getId(), r), + e -> task.onSliceFailure(listener, slice.source().slice().getId(), e)); + client.execute(action, requestForSlice, sliceListener); + /* Explicitly tell the task manager that we're running child tasks on the local node so it will cancel them when the parent is + * cancelled. */ + taskManager.registerChildTask(task, localNodeId); + } + } + + /** + * Slice a search request into {@code times} separate search requests slicing on {@code field}. Note that the slices are *shallow* + * copies of this request so don't change them. + */ + static SearchRequest[] sliceIntoSubRequests(SearchRequest request, String field, int times) { + SearchRequest[] slices = new SearchRequest[times]; + for (int slice = 0; slice < times; slice++) { + SliceBuilder sliceBuilder = new SliceBuilder(field, slice, times); + SearchSourceBuilder slicedSource; + if (request.source() == null) { + slicedSource = new SearchSourceBuilder().slice(sliceBuilder); + } else { + if (request.source().slice() != null) { + throw new IllegalStateException("Can't slice a request that already has a slice configuration"); + } + slicedSource = request.source().copyWithNewSlice(sliceBuilder); + } + slices[slice] = new SearchRequest() + .source(slicedSource) + .searchType(request.searchType()) + .indices(request.indices()) + .types(request.types()) + .routing(request.routing()) + .preference(request.preference()) + .requestCache(request.requestCache()) + .scroll(request.scroll()) + .indicesOptions(request.indicesOptions()); + } + return slices; + } + +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index 8c11cd3430f..40aa745d06a 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.index.reindex.remote.RemoteInfo; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; import java.util.Arrays; @@ -56,7 +57,11 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest 1 but was [" + getSlices() + "]", e); + } } return e; } @@ -125,6 +135,13 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest + client.execute(RethrottleAction.INSTANCE, internalRequest, listTasksResponseListener(clusterService, groupBy, channel)); } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java index 06adb76df78..8201f05f9ec 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java @@ -26,24 +26,21 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchRequestParsers; -import org.elasticsearch.search.aggregations.AggregatorParsers; -import org.elasticsearch.search.suggest.Suggesters; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.function.Consumer; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.script.Script.ScriptField; +import static org.elasticsearch.script.Script.DEFAULT_SCRIPT_LANG; public class RestUpdateByQueryAction extends AbstractBulkByQueryRestHandler { @@ -56,8 +53,8 @@ public class RestUpdateByQueryAction extends AbstractBulkByQueryRestHandler config, ParseFieldMatcher parseFieldMatcher) { String script = null; - ScriptService.ScriptType type = null; - String lang = null; - Map params = null; + ScriptType type = null; + String lang = DEFAULT_SCRIPT_LANG; + Map params = Collections.emptyMap(); for (Iterator> itr = config.entrySet().iterator(); itr.hasNext();) { Map.Entry entry = itr.next(); String parameterName = entry.getKey(); Object parameterValue = entry.getValue(); - if (parseFieldMatcher.match(parameterName, ScriptField.LANG)) { + if (parseFieldMatcher.match(parameterName, Script.LANG_PARSE_FIELD)) { if (parameterValue instanceof String || parameterValue == null) { lang = (String) parameterValue; } else { throw new ElasticsearchParseException("Value must be of type String: [" + parameterName + "]"); } - } else if (parseFieldMatcher.match(parameterName, ScriptField.PARAMS)) { + } else if (parseFieldMatcher.match(parameterName, Script.PARAMS_PARSE_FIELD)) { if (parameterValue instanceof Map || parameterValue == null) { params = (Map) parameterValue; } else { throw new ElasticsearchParseException("Value must be of type String: [" + parameterName + "]"); } - } else if (parseFieldMatcher.match(parameterName, ScriptService.ScriptType.INLINE.getParseField())) { + } else if (parseFieldMatcher.match(parameterName, ScriptType.INLINE.getParseField())) { if (parameterValue instanceof String || parameterValue == null) { script = (String) parameterValue; - type = ScriptService.ScriptType.INLINE; + type = ScriptType.INLINE; } else { throw new ElasticsearchParseException("Value must be of type String: [" + parameterName + "]"); } - } else if (parseFieldMatcher.match(parameterName, ScriptService.ScriptType.FILE.getParseField())) { + } else if (parseFieldMatcher.match(parameterName, ScriptType.FILE.getParseField())) { if (parameterValue instanceof String || parameterValue == null) { script = (String) parameterValue; - type = ScriptService.ScriptType.FILE; + type = ScriptType.FILE; } else { throw new ElasticsearchParseException("Value must be of type String: [" + parameterName + "]"); } - } else if (parseFieldMatcher.match(parameterName, ScriptService.ScriptType.STORED.getParseField())) { + } else if (parseFieldMatcher.match(parameterName, ScriptType.STORED.getParseField())) { if (parameterValue instanceof String || parameterValue == null) { script = (String) parameterValue; - type = ScriptService.ScriptType.STORED; + type = ScriptType.STORED; } else { throw new ElasticsearchParseException("Value must be of type String: [" + parameterName + "]"); } @@ -127,10 +124,11 @@ public class RestUpdateByQueryAction extends AbstractBulkByQueryRestHandler listener) { - ClusterState state = clusterService.state(); - ParentTaskAssigningClient client = new ParentTaskAssigningClient(this.client, clusterService.localNode(), task); - new AsyncDeleteBySearchAction((BulkByScrollTask) task, logger, client, threadPool, request, listener, scriptService, state).start(); + if (request.getSlices() > 1) { + ReindexParallelizationHelper.startSlices(client, taskManager, DeleteByQueryAction.INSTANCE, clusterService.localNode().getId(), + (ParentBulkByScrollTask) task, request, listener); + } else { + ClusterState state = clusterService.state(); + ParentTaskAssigningClient client = new ParentTaskAssigningClient(this.client, clusterService.localNode(), task); + new AsyncDeleteBySearchAction((WorkingBulkByScrollTask) task, logger, client, threadPool, request, listener, scriptService, + state).start(); + } } @Override @@ -68,9 +74,9 @@ public class TransportDeleteByQueryAction extends HandledTransportAction { - public AsyncDeleteBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, - DeleteByQueryRequest request, ActionListener listener, - ScriptService scriptService, ClusterState clusterState) { + public AsyncDeleteBySearchAction(WorkingBulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, + ThreadPool threadPool, DeleteByQueryRequest request, ActionListener listener, + ScriptService scriptService, ClusterState clusterState) { super(task, logger, client, threadPool, request, listener, scriptService, clusterState); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index 33aca028351..96f9061c216 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -28,6 +28,11 @@ import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.nio.reactor.IOReactorConfig; import org.apache.http.message.BasicHeader; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.apache.lucene.util.automaton.MinimizationOperations; +import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.bulk.BackoffPolicy; @@ -43,15 +48,13 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.http.HttpInfo; -import org.elasticsearch.http.HttpServer; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.TTLFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; @@ -65,11 +68,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.Function; @@ -87,31 +88,35 @@ public class TransportReindexAction extends HandledTransportAction remoteWhitelist; - private final HttpServer httpServer; + private final CharacterRunAutomaton remoteWhitelist; @Inject public TransportReindexAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, ScriptService scriptService, - AutoCreateIndex autoCreateIndex, Client client, TransportService transportService, @Nullable HttpServer httpServer) { + AutoCreateIndex autoCreateIndex, Client client, TransportService transportService) { super(settings, ReindexAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ReindexRequest::new); this.clusterService = clusterService; this.scriptService = scriptService; this.autoCreateIndex = autoCreateIndex; this.client = client; - remoteWhitelist = new HashSet<>(REMOTE_CLUSTER_WHITELIST.get(settings)); - this.httpServer = httpServer; + remoteWhitelist = buildRemoteWhitelist(REMOTE_CLUSTER_WHITELIST.get(settings)); } @Override protected void doExecute(Task task, ReindexRequest request, ActionListener listener) { - checkRemoteWhitelist(request.getRemoteInfo()); - ClusterState state = clusterService.state(); - validateAgainstAliases(request.getSearchRequest(), request.getDestination(), request.getRemoteInfo(), indexNameExpressionResolver, - autoCreateIndex, state); - ParentTaskAssigningClient client = new ParentTaskAssigningClient(this.client, clusterService.localNode(), task); - new AsyncIndexBySearchAction((BulkByScrollTask) task, logger, client, threadPool, request, listener, scriptService, state).start(); + if (request.getSlices() > 1) { + ReindexParallelizationHelper.startSlices(client, taskManager, ReindexAction.INSTANCE, clusterService.localNode().getId(), + (ParentBulkByScrollTask) task, request, listener); + } else { + checkRemoteWhitelist(remoteWhitelist, request.getRemoteInfo()); + ClusterState state = clusterService.state(); + validateAgainstAliases(request.getSearchRequest(), request.getDestination(), request.getRemoteInfo(), + indexNameExpressionResolver, autoCreateIndex, state); + ParentTaskAssigningClient client = new ParentTaskAssigningClient(this.client, clusterService.localNode(), task); + new AsyncIndexBySearchAction((WorkingBulkByScrollTask) task, logger, client, threadPool, request, listener, scriptService, + state).start(); + } } @Override @@ -119,31 +124,35 @@ public class TransportReindexAction extends HandledTransportAction whitelist, RemoteInfo remoteInfo, TransportAddress publishAddress) { - if (remoteInfo == null) return; String check = remoteInfo.getHost() + ':' + remoteInfo.getPort(); - if (whitelist.contains(check)) return; - /* - * For testing we support the key "myself" to allow connecting to the local node. We can't just change the setting to include the - * local node because it is intentionally not a dynamic setting for security purposes. We can't use something like "localhost:9200" - * because we don't know up front which port we'll get because the tests bind to port 0. Instead we try to resolve it here, taking - * "myself" to mean "my published http address". - */ - if (whitelist.contains("myself") && publishAddress != null && publishAddress.toString().equals(check)) { + if (whitelist.run(check)) { return; } throw new IllegalArgumentException('[' + check + "] not whitelisted in " + REMOTE_CLUSTER_WHITELIST.getKey()); } + /** + * Build the {@link CharacterRunAutomaton} that represents the reindex-from-remote whitelist and make sure that it doesn't whitelist + * the world. + */ + static CharacterRunAutomaton buildRemoteWhitelist(List whitelist) { + if (whitelist.isEmpty()) { + return new CharacterRunAutomaton(Automata.makeEmpty()); + } + Automaton automaton = Regex.simpleMatchToAutomaton(whitelist.toArray(Strings.EMPTY_ARRAY)); + automaton = MinimizationOperations.minimize(automaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES); + if (Operations.isTotal(automaton)) { + throw new IllegalArgumentException("Refusing to start because whitelist " + whitelist + " accepts all addresses. " + + "This would allow users to reindex-from-remote any URL they like effectively having Elasticsearch make HTTP GETs " + + "for them."); + } + return new CharacterRunAutomaton(automaton); + } + /** * Throws an ActionRequestValidationException if the request tries to index * back into the same index or into an index that points to two indexes. @@ -226,9 +235,9 @@ public class TransportReindexAction extends HandledTransportAction createdThreads = emptyList(); - public AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, - ReindexRequest request, ActionListener listener, - ScriptService scriptService, ClusterState clusterState) { + public AsyncIndexBySearchAction(WorkingBulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, + ThreadPool threadPool, ReindexRequest request, ActionListener listener, + ScriptService scriptService, ClusterState clusterState) { super(task, logger, client, threadPool, request, listener, scriptService, clusterState); } @@ -347,7 +356,7 @@ public class TransportReindexAction extends HandledTransportAction params) { super(task, scriptService, script, params); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java index 89af3bd39bd..9f27b521e9b 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java @@ -19,16 +19,19 @@ package org.elasticsearch.index.reindex; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -37,18 +40,38 @@ import java.io.IOException; import java.util.List; public class TransportRethrottleAction extends TransportTasksAction { + private final Client client; + @Inject public TransportRethrottleAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + Client client) { super(settings, RethrottleAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, RethrottleRequest::new, ListTasksResponse::new, ThreadPool.Names.MANAGEMENT); + this.client = client; } @Override - protected TaskInfo taskOperation(RethrottleRequest request, BulkByScrollTask task) { - // Apply the new throttle and fetch status of the task. The user might not want that status but they likely do and it is cheap. - task.rethrottle(request.getRequestsPerSecond()); - return task.taskInfo(clusterService.localNode(), true); + protected void taskOperation(RethrottleRequest request, BulkByScrollTask task, ActionListener listener) { + rethrottle(clusterService.localNode().getId(), client, task, request.getRequestsPerSecond(), listener); + } + + static void rethrottle(String localNodeId, Client client, BulkByScrollTask task, float newRequestsPerSecond, + ActionListener listener) { + int runningSubTasks = task.runningSliceSubTasks(); + if (runningSubTasks == 0) { + // Nothing to do, all sub tasks are done + task.rethrottle(newRequestsPerSecond); + listener.onResponse(task.taskInfo(localNodeId, true)); + return; + } + RethrottleRequest subRequest = new RethrottleRequest(); + subRequest.setRequestsPerSecond(newRequestsPerSecond / runningSubTasks); + subRequest.setParentTaskId(new TaskId(localNodeId, task.getId())); + client.execute(RethrottleAction.INSTANCE, subRequest, ActionListener.wrap(r -> { + r.rethrowFailures("Rethrottle"); + listener.onResponse(task.getInfoGivenSliceInfo(localNodeId, r.getTasks())); + }, listener::onFailure)); } @Override diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java index 0f4bf5695d1..d8ca0441023 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java @@ -66,9 +66,15 @@ public class TransportUpdateByQueryAction extends HandledTransportAction listener) { - ClusterState state = clusterService.state(); - ParentTaskAssigningClient client = new ParentTaskAssigningClient(this.client, clusterService.localNode(), task); - new AsyncIndexBySearchAction((BulkByScrollTask) task, logger, client, threadPool, request, listener, scriptService, state).start(); + if (request.getSlices() > 1) { + ReindexParallelizationHelper.startSlices(client, taskManager, UpdateByQueryAction.INSTANCE, clusterService.localNode().getId(), + (ParentBulkByScrollTask) task, request, listener); + } else { + ClusterState state = clusterService.state(); + ParentTaskAssigningClient client = new ParentTaskAssigningClient(this.client, clusterService.localNode(), task); + new AsyncIndexBySearchAction((WorkingBulkByScrollTask) task, logger, client, threadPool, request, listener, scriptService, + state).start(); + } } @Override @@ -81,9 +87,9 @@ public class TransportUpdateByQueryAction extends HandledTransportAction { - public AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, - UpdateByQueryRequest request, ActionListener listener, - ScriptService scriptService, ClusterState clusterState) { + public AsyncIndexBySearchAction(WorkingBulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, + ThreadPool threadPool, UpdateByQueryRequest request, ActionListener listener, + ScriptService scriptService, ClusterState clusterState) { super(task, logger, client, threadPool, request, listener, scriptService, clusterState); } @@ -120,7 +126,7 @@ public class TransportUpdateByQueryAction extends HandledTransportAction params) { super(task, scriptService, script, params); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java index 3401ce4582b..56ba3230133 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; @@ -43,7 +44,11 @@ public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest delayedPrepareBulkRequestReference = new AtomicReference<>(); + + public WorkingBulkByScrollTask(long id, String type, String action, String description, TaskId parentTask, Integer sliceId, + float requestsPerSecond) { + super(id, type, action, description, parentTask); + this.sliceId = sliceId; + setRequestsPerSecond(requestsPerSecond); + } + + @Override + public Status getStatus() { + return new Status(sliceId, total.get(), updated.get(), created.get(), deleted.get(), batch.get(), versionConflicts.get(), + noops.get(), bulkRetries.get(), searchRetries.get(), timeValueNanos(throttledNanos.get()), getRequestsPerSecond(), + getReasonCancelled(), throttledUntil()); + } + + @Override + protected void onCancelled() { + // Drop the throttle to 0, immediately rescheduling all outstanding tasks so the task will wake up and cancel itself. + rethrottle(0); + } + + @Override + int runningSliceSubTasks() { + return 0; + } + + @Override + public TaskInfo getInfoGivenSliceInfo(String localNodeId, List sliceInfo) { + throw new UnsupportedOperationException("This is only supported by " + ParentBulkByScrollTask.class.getName() + "."); + } + + TimeValue throttledUntil() { + DelayedPrepareBulkRequest delayed = delayedPrepareBulkRequestReference.get(); + if (delayed == null) { + return timeValueNanos(0); + } + if (delayed.future == null) { + return timeValueNanos(0); + } + return timeValueNanos(max(0, delayed.future.getDelay(TimeUnit.NANOSECONDS))); + } + + void setTotal(long totalHits) { + total.set(totalHits); + } + + void countBatch() { + batch.incrementAndGet(); + } + + void countNoop() { + noops.incrementAndGet(); + } + + @Override + public long getCreated() { + return created.get(); + } + + void countCreated() { + created.incrementAndGet(); + } + + @Override + public long getUpdated() { + return updated.get(); + } + + void countUpdated() { + updated.incrementAndGet(); + } + + @Override + public long getDeleted() { + return deleted.get(); + } + + void countDeleted() { + deleted.incrementAndGet(); + } + + void countVersionConflict() { + versionConflicts.incrementAndGet(); + } + + void countBulkRetry() { + bulkRetries.incrementAndGet(); + } + + void countSearchRetry() { + searchRetries.incrementAndGet(); + } + + float getRequestsPerSecond() { + return requestsPerSecond; + } + + /** + * Schedule prepareBulkRequestRunnable to run after some delay. This is where throttling plugs into reindexing so the request can be + * rescheduled over and over again. + */ + void delayPrepareBulkRequest(ThreadPool threadPool, TimeValue lastBatchStartTime, int lastBatchSize, + AbstractRunnable prepareBulkRequestRunnable) { + // Synchronize so we are less likely to schedule the same request twice. + synchronized (delayedPrepareBulkRequestReference) { + TimeValue delay = throttleWaitTime(lastBatchStartTime, lastBatchSize); + delayedPrepareBulkRequestReference.set(new DelayedPrepareBulkRequest(threadPool, getRequestsPerSecond(), + delay, new RunOnce(prepareBulkRequestRunnable))); + } + } + + TimeValue throttleWaitTime(TimeValue lastBatchStartTime, int lastBatchSize) { + long earliestNextBatchStartTime = lastBatchStartTime.nanos() + (long) perfectlyThrottledBatchTime(lastBatchSize); + return timeValueNanos(max(0, earliestNextBatchStartTime - System.nanoTime())); + } + + /** + * How many nanoseconds should a batch of lastBatchSize have taken if it were perfectly throttled? Package private for testing. + */ + float perfectlyThrottledBatchTime(int lastBatchSize) { + if (requestsPerSecond == Float.POSITIVE_INFINITY) { + return 0; + } + // requests + // ------------------- == seconds + // request per seconds + float targetBatchTimeInSeconds = lastBatchSize / requestsPerSecond; + // nanoseconds per seconds * seconds == nanoseconds + return TimeUnit.SECONDS.toNanos(1) * targetBatchTimeInSeconds; + } + + private void setRequestsPerSecond(float requestsPerSecond) { + this.requestsPerSecond = requestsPerSecond; + } + + @Override + void rethrottle(float newRequestsPerSecond) { + synchronized (delayedPrepareBulkRequestReference) { + if (logger.isDebugEnabled()) { + logger.debug("[{}]: Rethrottling to [{}] requests per second", getId(), newRequestsPerSecond); + } + setRequestsPerSecond(newRequestsPerSecond); + + DelayedPrepareBulkRequest delayedPrepareBulkRequest = this.delayedPrepareBulkRequestReference.get(); + if (delayedPrepareBulkRequest == null) { + if (logger.isDebugEnabled()) { + logger.debug("[{}]: Skipping rescheduling because there is no scheduled task", getId()); + } + // No request has been queued yet so nothing to reschedule. + return; + } + + this.delayedPrepareBulkRequestReference.set(delayedPrepareBulkRequest.rethrottle(newRequestsPerSecond)); + } + } + + class DelayedPrepareBulkRequest { + private final ThreadPool threadPool; + private final AbstractRunnable command; + private final float requestsPerSecond; + private final ScheduledFuture future; + + DelayedPrepareBulkRequest(ThreadPool threadPool, float requestsPerSecond, TimeValue delay, AbstractRunnable command) { + this.threadPool = threadPool; + this.requestsPerSecond = requestsPerSecond; + this.command = command; + this.future = threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + throttledNanos.addAndGet(delay.nanos()); + command.run(); + } + + @Override + public void onFailure(Exception e) { + command.onFailure(e); + } + }); + } + + DelayedPrepareBulkRequest rethrottle(float newRequestsPerSecond) { + if (newRequestsPerSecond != 0 && newRequestsPerSecond < requestsPerSecond) { + /* + * The user is attempting to slow the request down. We'll let the change in throttle take effect the next time we delay + * prepareBulkRequest. We can't just reschedule the request further out in the future the bulk context might time out. + */ + if (logger.isDebugEnabled()) { + logger.debug("[{}]: Skipping rescheduling because the new throttle [{}] is slower than the old one [{}].", getId(), + newRequestsPerSecond, requestsPerSecond); + } + return this; + } + + long remainingDelay = future.getDelay(TimeUnit.NANOSECONDS); + // Actually reschedule the task + if (false == FutureUtils.cancel(future)) { + // Couldn't cancel, probably because the task has finished or been scheduled. Either way we have nothing to do here. + if (logger.isDebugEnabled()) { + logger.debug("[{}]: Skipping rescheduling we couldn't cancel the task.", getId()); + } + return this; + } + + /* + * Strangely enough getting here doesn't mean that you actually cancelled the request, just that you probably did. If you stress + * test it you'll find that requests sneak through. So each request is given a runOnce boolean to prevent that. + */ + TimeValue newDelay = newDelay(remainingDelay, newRequestsPerSecond); + if (logger.isDebugEnabled()) { + logger.debug("[{}]: Rescheduling for [{}] in the future.", getId(), newDelay); + } + return new DelayedPrepareBulkRequest(threadPool, requestsPerSecond, newDelay, command); + } + + /** + * Scale back remaining delay to fit the new delay. + */ + TimeValue newDelay(long remainingDelay, float newRequestsPerSecond) { + if (remainingDelay < 0 || newRequestsPerSecond == 0) { + return timeValueNanos(0); + } + return timeValueNanos(round(remainingDelay * requestsPerSecond / newRequestsPerSecond)); + } + } + + /** + * Runnable that can only be run one time. This is paranoia to prevent furiously rethrottling from running the command multiple times. + * Without it the command would be run multiple times. + */ + private static class RunOnce extends AbstractRunnable { + private final AtomicBoolean hasRun = new AtomicBoolean(false); + private final AbstractRunnable delegate; + + public RunOnce(AbstractRunnable delegate) { + this.delegate = delegate; + } + + @Override + protected void doRun() throws Exception { + if (hasRun.compareAndSet(false, true)) { + delegate.run(); + } + } + + @Override + public void onFailure(Exception e) { + delegate.onFailure(e); + } + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index b2700618f02..9ecb4700f58 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -86,9 +86,13 @@ final class RemoteRequestBuilders { for (int i = 1; i < searchRequest.source().sorts().size(); i++) { sorts.append(',').append(sortToUri(searchRequest.source().sorts().get(i))); } - params.put("sorts", sorts.toString()); + params.put("sort", sorts.toString()); } } + if (remoteVersion.before(Version.V_2_0_0)) { + // Versions before 2.0.0 need prompting to return interesting fields. Note that timestamp isn't available at all.... + searchRequest.source().storedField("_parent").storedField("_routing").storedField("_ttl"); + } if (searchRequest.source().storedFields() != null && false == searchRequest.source().storedFields().fieldNames().isEmpty()) { StringBuilder fields = new StringBuilder(searchRequest.source().storedFields().fieldNames().get(0)); for (int i = 1; i < searchRequest.source().storedFields().fieldNames().size(); i++) { @@ -97,6 +101,8 @@ final class RemoteRequestBuilders { String storedFieldsParamName = remoteVersion.before(Version.V_5_0_0_alpha4) ? "fields" : "stored_fields"; params.put(storedFieldsParamName, fields.toString()); } + // We always want the _source document and this will force it to be returned. + params.put("_source", "true"); return params; } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java index 7ecec0aa19b..4583e4c8c05 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java @@ -83,10 +83,28 @@ final class RemoteResponseParsers { throw new ParsingException(p.getTokenLocation(), "[hit] failed to parse [_source]", e); } }, new ParseField("_source")); - HIT_PARSER.declareString(BasicHit::setRouting, new ParseField("_routing")); - HIT_PARSER.declareString(BasicHit::setParent, new ParseField("_parent")); - HIT_PARSER.declareLong(BasicHit::setTTL, new ParseField("_ttl")); + ParseField routingField = new ParseField("_routing"); + ParseField parentField = new ParseField("_parent"); + ParseField ttlField = new ParseField("_ttl"); + HIT_PARSER.declareString(BasicHit::setRouting, routingField); + HIT_PARSER.declareString(BasicHit::setParent, parentField); + HIT_PARSER.declareLong(BasicHit::setTTL, ttlField); HIT_PARSER.declareLong(BasicHit::setTimestamp, new ParseField("_timestamp")); + // Pre-2.0.0 parent and routing come back in "fields" + class Fields { + String routing; + String parent; + long ttl; + } + ObjectParser fieldsParser = new ObjectParser<>("fields", Fields::new); + HIT_PARSER.declareObject((hit, fields) -> { + hit.setRouting(fields.routing); + hit.setParent(fields.parent); + hit.setTTL(fields.ttl); + }, fieldsParser, new ParseField("fields")); + fieldsParser.declareString((fields, routing) -> fields.routing = routing, routingField); + fieldsParser.declareString((fields, parent) -> fields.parent = parent, parentField); + fieldsParser.declareLong((fields, ttl) -> fields.ttl = ttl, ttlField); } /** diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java index 207948c9215..601f46e901b 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex.remote; +import org.apache.http.ContentTooLongException; import org.apache.http.HttpEntity; import org.apache.http.util.EntityUtils; import org.apache.logging.log4j.Logger; @@ -184,6 +185,9 @@ public class RemoteScrollableHitSource extends ScrollableHitSource { } e = wrapExceptionToPreserveStatus(re.getResponse().getStatusLine().getStatusCode(), re.getResponse().getEntity(), re); + } else if (e instanceof ContentTooLongException) { + e = new IllegalArgumentException( + "Remote responded with a chunk that was too large. Use a smaller batch size.", e); } fail.accept(e); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java index 66c636f4f1d..3df61f36915 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java @@ -32,12 +32,12 @@ public abstract class AbstractAsyncBulkIndexByScrollActionTestCase< Response extends BulkIndexByScrollResponse> extends ESTestCase { protected ThreadPool threadPool; - protected BulkByScrollTask task; + protected WorkingBulkByScrollTask task; @Before public void setupForTest() { threadPool = new TestThreadPool(getTestName()); - task = new BulkByScrollTask(1, "test", "test", "test", TaskId.EMPTY_TASK_ID, 0); + task = new WorkingBulkByScrollTask(1, "test", "test", "test", TaskId.EMPTY_TASK_ID, null, 0); } @After diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestTestCase.java new file mode 100644 index 00000000000..86b36f11a97 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestTestCase.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; + +/** + * Shared superclass for testing reindex and friends. In particular it makes sure to test the slice features. + */ +public abstract class AbstractBulkByScrollRequestTestCase> extends ESTestCase { + public void testForSlice() { + R original = newRequest(); + original.setAbortOnVersionConflict(randomBoolean()); + original.setRefresh(randomBoolean()); + original.setTimeout(parseTimeValue(randomPositiveTimeValue(), "timeout")); + original.setWaitForActiveShards( + randomFrom(ActiveShardCount.ALL, ActiveShardCount.NONE, ActiveShardCount.ONE, ActiveShardCount.DEFAULT)); + original.setRetryBackoffInitialTime(parseTimeValue(randomPositiveTimeValue(), "retry_backoff_initial_time")); + original.setMaxRetries(between(0, 1000)); + original.setSlices(between(2, 1000)); + original.setRequestsPerSecond( + randomBoolean() ? Float.POSITIVE_INFINITY : randomValueOtherThanMany(r -> r < 0, ESTestCase::randomFloat)); + original.setSize(randomBoolean() ? AbstractBulkByScrollRequest.SIZE_ALL_MATCHES : between(0, Integer.MAX_VALUE)); + + TaskId slicingTask = new TaskId(randomAsciiOfLength(5), randomLong()); + SearchRequest sliceRequest = new SearchRequest(); + R forSliced = original.forSlice(slicingTask, sliceRequest); + assertEquals(original.isAbortOnVersionConflict(), forSliced.isAbortOnVersionConflict()); + assertEquals(original.isRefresh(), forSliced.isRefresh()); + assertEquals(original.getTimeout(), forSliced.getTimeout()); + assertEquals(original.getWaitForActiveShards(), forSliced.getWaitForActiveShards()); + assertEquals(original.getRetryBackoffInitialTime(), forSliced.getRetryBackoffInitialTime()); + assertEquals(original.getMaxRetries(), forSliced.getMaxRetries()); + assertEquals("only the parent task should store results", false, forSliced.getShouldStoreResult()); + assertEquals("slice requests always have a single worker", 1, forSliced.getSlices()); + assertEquals("requests_per_second is split between all workers", original.getRequestsPerSecond() / original.getSlices(), + forSliced.getRequestsPerSecond(), Float.MIN_NORMAL); + assertEquals("size is split evenly between all workers", original.getSize() == AbstractBulkByScrollRequest.SIZE_ALL_MATCHES + ? AbstractBulkByScrollRequest.SIZE_ALL_MATCHES : original.getSize() / original.getSlices(), forSliced.getSize()); + assertEquals(slicingTask, forSliced.getParentTask()); + + extraForSliceAssertions(original, forSliced); + } + + protected abstract R newRequest(); + protected abstract void extraRandomizationForSlice(R original); + protected abstract void extraForSliceAssertions(R original, R forSliced); +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 4bd4cb47e2f..35c3f235cd8 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -28,6 +28,8 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.DocWriteResponse.Result; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -47,7 +49,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Client; @@ -56,7 +57,6 @@ import org.elasticsearch.client.ParentTaskAssigningClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -120,7 +120,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { private PlainActionFuture listener; private String scrollId; private TaskManager taskManager; - private BulkByScrollTask testTask; + private WorkingBulkByScrollTask testTask; private Map expectedHeaders = new HashMap<>(); private DiscoveryNode localNode; private TaskId taskId; @@ -134,14 +134,14 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { listener = new PlainActionFuture<>(); scrollId = null; taskManager = new TaskManager(Settings.EMPTY); - testTask = (BulkByScrollTask) taskManager.register("don'tcare", "hereeither", testRequest); + testTask = (WorkingBulkByScrollTask) taskManager.register("don'tcare", "hereeither", testRequest); // Fill the context with something random so we can make sure we inherited it appropriately. expectedHeaders.clear(); expectedHeaders.put(randomSimpleString(random()), randomSimpleString(random())); threadPool.getThreadContext().newStoredContext(); threadPool.getThreadContext().putHeader(expectedHeaders); - localNode = new DiscoveryNode("thenode", new LocalTransportAddress("dead.end:666"), emptyMap(), emptySet(), Version.CURRENT); + localNode = new DiscoveryNode("thenode", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); taskId = new TaskId(localNode.getId(), testTask.getId()); } @@ -257,39 +257,38 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { BulkItemResponse[] responses = new BulkItemResponse[randomIntBetween(0, 100)]; for (int i = 0; i < responses.length; i++) { ShardId shardId = new ShardId(new Index("name", "uid"), 0); - String opType; if (rarely()) { - opType = randomSimpleString(random()); versionConflicts++; - responses[i] = new BulkItemResponse(i, opType, new Failure(shardId.getIndexName(), "type", "id" + i, + responses[i] = new BulkItemResponse(i, randomFrom(DocWriteRequest.OpType.values()), + new Failure(shardId.getIndexName(), "type", "id" + i, new VersionConflictEngineException(shardId, "type", "id", "test"))); continue; } boolean createdResponse; + DocWriteRequest.OpType opType; switch (randomIntBetween(0, 2)) { case 0: - opType = randomFrom("index", "create"); createdResponse = true; + opType = DocWriteRequest.OpType.CREATE; created++; break; case 1: - opType = randomFrom("index", "create"); createdResponse = false; + opType = randomFrom(DocWriteRequest.OpType.INDEX, DocWriteRequest.OpType.UPDATE); updated++; break; case 2: - opType = "delete"; createdResponse = false; + opType = DocWriteRequest.OpType.DELETE; deleted++; break; default: throw new RuntimeException("Bad scenario"); } - responses[i] = - new BulkItemResponse( - i, - opType, - new IndexResponse(shardId, "type", "id" + i, randomInt(20), randomInt(), createdResponse)); + responses[i] = new BulkItemResponse( + i, + opType, + new IndexResponse(shardId, "type", "id" + i, randomInt(20), randomInt(), createdResponse)); } new DummyAbstractAsyncBulkByScrollAction().onBulkResponse(timeValueNanos(System.nanoTime()), new BulkResponse(responses, 0)); assertEquals(versionConflicts, testTask.getStatus().getVersionConflicts()); @@ -363,7 +362,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { public void testBulkFailuresAbortRequest() throws Exception { Failure failure = new Failure("index", "type", "id", new RuntimeException("test")); DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction(); - BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] {new BulkItemResponse(0, "index", failure)}, randomLong()); + BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] + {new BulkItemResponse(0, DocWriteRequest.OpType.CREATE, failure)}, randomLong()); action.onBulkResponse(timeValueNanos(System.nanoTime()), bulkResponse); BulkIndexByScrollResponse response = listener.get(); assertThat(response.getBulkFailures(), contains(failure)); @@ -687,7 +687,12 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { private static class DummyAbstractBulkByScrollRequest extends AbstractBulkByScrollRequest { public DummyAbstractBulkByScrollRequest(SearchRequest searchRequest) { - super(searchRequest); + super(searchRequest, true); + } + + @Override + DummyAbstractBulkByScrollRequest forSlice(TaskId slicingTask, SearchRequest slice) { + throw new UnsupportedOperationException(); } @Override @@ -769,13 +774,11 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } BulkItemResponse[] responses = new BulkItemResponse[bulk.requests().size()]; for (int i = 0; i < bulk.requests().size(); i++) { - ActionRequest item = bulk.requests().get(i); - String opType; + DocWriteRequest item = bulk.requests().get(i); DocWriteResponse response; - ShardId shardId = new ShardId(new Index(((ReplicationRequest) item).index(), "uuid"), 0); + ShardId shardId = new ShardId(new Index(item.index(), "uuid"), 0); if (item instanceof IndexRequest) { IndexRequest index = (IndexRequest) item; - opType = index.opType().lowercase(); response = new IndexResponse( shardId, @@ -786,12 +789,10 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { true); } else if (item instanceof UpdateRequest) { UpdateRequest update = (UpdateRequest) item; - opType = "update"; response = new UpdateResponse(shardId, update.type(), update.id(), - randomIntBetween(0, Integer.MAX_VALUE), DocWriteResponse.Result.CREATED); + randomIntBetween(0, Integer.MAX_VALUE), Result.CREATED); } else if (item instanceof DeleteRequest) { DeleteRequest delete = (DeleteRequest) item; - opType = "delete"; response = new DeleteResponse( shardId, @@ -804,10 +805,10 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { throw new RuntimeException("Unknown request: " + item); } if (i == toReject) { - responses[i] = new BulkItemResponse(i, opType, + responses[i] = new BulkItemResponse(i, item.opType(), new Failure(response.getIndex(), response.getType(), response.getId(), new EsRejectedExecutionException())); } else { - responses[i] = new BulkItemResponse(i, opType, response); + responses[i] = new BulkItemResponse(i, item.opType(), response); } } listener.onResponse((Response) new BulkResponse(responses, 1)); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java index 72c650805ce..b5f4c16f3dd 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java @@ -20,281 +20,143 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.junit.Before; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.Delayed; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.Arrays; +import static java.lang.Math.min; import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; -import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; -import static org.hamcrest.Matchers.both; -import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.not; public class BulkByScrollTaskTests extends ESTestCase { - private BulkByScrollTask task; - - @Before - public void createTask() { - task = new BulkByScrollTask(1, "test_type", "test_action", "test", TaskId.EMPTY_TASK_ID, Float.POSITIVE_INFINITY); - } - - public void testBasicData() { - assertEquals(1, task.getId()); - assertEquals("test_type", task.getType()); - assertEquals("test_action", task.getAction()); - } - - public void testProgress() { - long created = 0; - long updated = 0; - long deleted = 0; - long versionConflicts = 0; - long noops = 0; - int batch = 0; - BulkByScrollTask.Status status = task.getStatus(); - assertEquals(0, status.getTotal()); - assertEquals(created, status.getCreated()); - assertEquals(updated, status.getUpdated()); - assertEquals(deleted, status.getDeleted()); - assertEquals(versionConflicts, status.getVersionConflicts()); - assertEquals(batch, status.getBatches()); - assertEquals(noops, status.getNoops()); - - long totalHits = randomIntBetween(10, 1000); - task.setTotal(totalHits); - for (long p = 0; p < totalHits; p++) { - status = task.getStatus(); - assertEquals(totalHits, status.getTotal()); - assertEquals(created, status.getCreated()); - assertEquals(updated, status.getUpdated()); - assertEquals(deleted, status.getDeleted()); - assertEquals(versionConflicts, status.getVersionConflicts()); - assertEquals(batch, status.getBatches()); - assertEquals(noops, status.getNoops()); - - if (randomBoolean()) { - created++; - task.countCreated(); - } else if (randomBoolean()) { - updated++; - task.countUpdated(); - } else { - deleted++; - task.countDeleted(); - } - - if (rarely()) { - versionConflicts++; - task.countVersionConflict(); - } - - if (rarely()) { - batch++; - task.countBatch(); - } - - if (rarely()) { - noops++; - task.countNoop(); - } - } - status = task.getStatus(); - assertEquals(totalHits, status.getTotal()); - assertEquals(created, status.getCreated()); - assertEquals(updated, status.getUpdated()); - assertEquals(deleted, status.getDeleted()); - assertEquals(versionConflicts, status.getVersionConflicts()); - assertEquals(batch, status.getBatches()); - assertEquals(noops, status.getNoops()); - } - public void testStatusHatesNegatives() { - checkStatusNegatives(-1, 0, 0, 0, 0, 0, 0, 0, 0, "total"); - checkStatusNegatives(0, -1, 0, 0, 0, 0, 0, 0, 0, "updated"); - checkStatusNegatives(0, 0, -1, 0, 0, 0, 0, 0, 0, "created"); - checkStatusNegatives(0, 0, 0, -1, 0, 0, 0, 0, 0, "deleted"); - checkStatusNegatives(0, 0, 0, 0, -1, 0, 0, 0, 0, "batches"); - checkStatusNegatives(0, 0, 0, 0, 0, -1, 0, 0, 0, "versionConflicts"); - checkStatusNegatives(0, 0, 0, 0, 0, 0, -1, 0, 0, "noops"); - checkStatusNegatives(0, 0, 0, 0, 0, 0, 0, -1, 0, "bulkRetries"); - checkStatusNegatives(0, 0, 0, 0, 0, 0, 0, 0, -1, "searchRetries"); + checkStatusNegatives(-1 , 0, 0, 0, 0, 0, 0, 0, 0, 0, "sliceId"); + checkStatusNegatives(null, -1, 0, 0, 0, 0, 0, 0, 0, 0, "total"); + checkStatusNegatives(null, 0, -1, 0, 0, 0, 0, 0, 0, 0, "updated"); + checkStatusNegatives(null, 0, 0, -1, 0, 0, 0, 0, 0, 0, "created"); + checkStatusNegatives(null, 0, 0, 0, -1, 0, 0, 0, 0, 0, "deleted"); + checkStatusNegatives(null, 0, 0, 0, 0, -1, 0, 0, 0, 0, "batches"); + checkStatusNegatives(null, 0, 0, 0, 0, 0, -1, 0, 0, 0, "versionConflicts"); + checkStatusNegatives(null, 0, 0, 0, 0, 0, 0, -1, 0, 0, "noops"); + checkStatusNegatives(null, 0, 0, 0, 0, 0, 0, 0, -1, 0, "bulkRetries"); + checkStatusNegatives(null, 0, 0, 0, 0, 0, 0, 0, 0, -1, "searchRetries"); } /** * Build a task status with only some values. Used for testing negative values. */ - private void checkStatusNegatives(long total, long updated, long created, long deleted, int batches, long versionConflicts, - long noops, long bulkRetries, long searchRetries, String fieldName) { + private void checkStatusNegatives(Integer sliceId, long total, long updated, long created, long deleted, int batches, + long versionConflicts, long noops, long bulkRetries, long searchRetries, String fieldName) { TimeValue throttle = parseTimeValue(randomPositiveTimeValue(), "test"); TimeValue throttledUntil = parseTimeValue(randomPositiveTimeValue(), "test"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(total, updated, created, - deleted, batches, versionConflicts, noops, bulkRetries, searchRetries, throttle, 0f, null, throttledUntil)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(sliceId, total, updated, + created, deleted, batches, versionConflicts, noops, bulkRetries, searchRetries, throttle, 0f, null, throttledUntil)); assertEquals(e.getMessage(), fieldName + " must be greater than 0 but was [-1]"); } - /** - * Furiously rethrottles a delayed request to make sure that we never run it twice. - */ - public void testDelayAndRethrottle() throws IOException, InterruptedException { - List errors = new CopyOnWriteArrayList<>(); - AtomicBoolean done = new AtomicBoolean(); - int threads = between(1, 10); - CyclicBarrier waitForShutdown = new CyclicBarrier(threads); - - /* - * We never end up waiting this long because the test rethrottles over and over again, ratcheting down the delay a random amount - * each time. - */ - float originalRequestsPerSecond = (float) randomDoubleBetween(1, 10000, true); - task.rethrottle(originalRequestsPerSecond); - TimeValue maxDelay = timeValueSeconds(between(1, 5)); - assertThat(maxDelay.nanos(), greaterThanOrEqualTo(0L)); - int batchSizeForMaxDelay = (int) (maxDelay.seconds() * originalRequestsPerSecond); - ThreadPool threadPool = new TestThreadPool(getTestName()) { - @Override - public ScheduledFuture schedule(TimeValue delay, String name, Runnable command) { - assertThat(delay.nanos(), both(greaterThanOrEqualTo(0L)).and(lessThanOrEqualTo(maxDelay.nanos()))); - return super.schedule(delay, name, command); - } - }; - try { - task.delayPrepareBulkRequest(threadPool, timeValueNanos(System.nanoTime()), batchSizeForMaxDelay, new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - boolean oldValue = done.getAndSet(true); - if (oldValue) { - throw new RuntimeException("Ran twice oh no!"); - } - } - - @Override - public void onFailure(Exception e) { - errors.add(e); - } - }); - - // Rethrottle on a random number of threads, on of which is this thread. - Runnable test = () -> { - try { - int rethrottles = 0; - while (false == done.get()) { - float requestsPerSecond = (float) randomDoubleBetween(0, originalRequestsPerSecond * 2, true); - task.rethrottle(requestsPerSecond); - rethrottles += 1; - } - logger.info("Rethrottled [{}] times", rethrottles); - waitForShutdown.await(); - } catch (Exception e) { - errors.add(e); - } - }; - for (int i = 1; i < threads; i++) { - threadPool.generic().execute(test); - } - test.run(); - } finally { - // Other threads should finish up quickly as they are checking the same AtomicBoolean. - threadPool.shutdown(); - threadPool.awaitTermination(10, TimeUnit.SECONDS); - } - assertThat(errors, empty()); - } - - public void testDelayNeverNegative() throws IOException { - // Thread pool that returns a ScheduledFuture that claims to have a negative delay - ThreadPool threadPool = new TestThreadPool("test") { - public ScheduledFuture schedule(TimeValue delay, String name, Runnable command) { - return new ScheduledFuture() { - @Override - public long getDelay(TimeUnit unit) { - return -1; - } - - @Override - public int compareTo(Delayed o) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean isCancelled() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean isDone() { - throw new UnsupportedOperationException(); - } - - @Override - public Void get() throws InterruptedException, ExecutionException { - throw new UnsupportedOperationException(); - } - - @Override - public Void get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { - throw new UnsupportedOperationException(); - } - }; - } - }; - try { - // Have the task use the thread pool to delay a task that does nothing - task.delayPrepareBulkRequest(threadPool, timeValueSeconds(0), 1, new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - } - @Override - public void onFailure(Exception e) { - throw new UnsupportedOperationException(); - } - }); - // Even though the future returns a negative delay we just return 0 because the time is up. - assertEquals(timeValueSeconds(0), task.getStatus().getThrottledUntil()); - } finally { - threadPool.shutdown(); - } - } - - public void testXContentRepresentationOfUnlimitedRequestsPerSecon() throws IOException { + public void testXContentRepresentationOfUnlimitedRequestsPerSecond() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder(); - task.getStatus().toXContent(builder, ToXContent.EMPTY_PARAMS); + BulkByScrollTask.Status status = new BulkByScrollTask.Status(null, 0, 0, 0, 0, 0, 0, 0, 0, 0, timeValueMillis(0), + Float.POSITIVE_INFINITY, null, timeValueMillis(0)); + status.toXContent(builder, ToXContent.EMPTY_PARAMS); assertThat(builder.string(), containsString("\"requests_per_second\":-1")); } - public void testPerfectlyThrottledBatchTime() { - task.rethrottle(Float.POSITIVE_INFINITY); - assertThat((double) task.perfectlyThrottledBatchTime(randomInt()), closeTo(0f, 0f)); + public void testXContentRepresentationOfUnfinishedSlices() throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder(); + BulkByScrollTask.Status completedStatus = new BulkByScrollTask.Status(2, 0, 0, 0, 0, 0, 0, 0, 0, 0, timeValueMillis(0), + Float.POSITIVE_INFINITY, null, timeValueMillis(0)); + BulkByScrollTask.Status status = new BulkByScrollTask.Status( + Arrays.asList(null, null, new BulkByScrollTask.StatusOrException(completedStatus)), null); + status.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertThat(builder.string(), containsString("\"slices\":[null,null,{\"slice_id\":2")); + } - int total = between(0, 1000000); - task.rethrottle(1); - assertThat((double) task.perfectlyThrottledBatchTime(total), - closeTo(TimeUnit.SECONDS.toNanos(total), TimeUnit.SECONDS.toNanos(1))); + public void testXContentRepresentationOfSliceFailures() throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder(); + Exception e = new Exception(); + BulkByScrollTask.Status status = new BulkByScrollTask.Status(Arrays.asList(null, null, new BulkByScrollTask.StatusOrException(e)), + null); + status.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertThat(builder.string(), containsString("\"slices\":[null,null,{\"type\":\"exception\"")); + } + + public void testMergeStatuses() { + BulkByScrollTask.StatusOrException[] statuses = new BulkByScrollTask.StatusOrException[between(2, 100)]; + boolean containsNullStatuses = randomBoolean(); + int mergedTotal = 0; + int mergedUpdated = 0; + int mergedCreated = 0; + int mergedDeleted = 0; + int mergedBatches = 0; + int mergedVersionConflicts = 0; + int mergedNoops = 0; + int mergedBulkRetries = 0; + int mergedSearchRetries = 0; + TimeValue mergedThrottled = timeValueNanos(0); + float mergedRequestsPerSecond = 0; + TimeValue mergedThrottledUntil = timeValueNanos(Integer.MAX_VALUE); + for (int i = 0; i < statuses.length; i++) { + if (containsNullStatuses && rarely()) { + continue; + } + int total = between(0, 10000); + int updated = between(0, total); + int created = between(0, total - updated); + int deleted = between(0, total - updated - created); + int batches = between(0, 10); + int versionConflicts = between(0, 100); + int noops = total - updated - created - deleted; + int bulkRetries = between(0, 100); + int searchRetries = between(0, 100); + TimeValue throttled = timeValueNanos(between(0, 10000)); + float requestsPerSecond = randomValueOtherThanMany(r -> r <= 0, () -> randomFloat()); + String reasonCancelled = randomBoolean() ? null : "test"; + TimeValue throttledUntil = timeValueNanos(between(0, 1000)); + statuses[i] = new BulkByScrollTask.StatusOrException(new BulkByScrollTask.Status(i, total, updated, created, deleted, batches, + versionConflicts, noops, bulkRetries, searchRetries, throttled, requestsPerSecond, reasonCancelled, throttledUntil)); + mergedTotal += total; + mergedUpdated += updated; + mergedCreated += created; + mergedDeleted += deleted; + mergedBatches += batches; + mergedVersionConflicts += versionConflicts; + mergedNoops += noops; + mergedBulkRetries += bulkRetries; + mergedSearchRetries += searchRetries; + mergedThrottled = timeValueNanos(mergedThrottled.nanos() + throttled.nanos()); + mergedRequestsPerSecond += requestsPerSecond; + mergedThrottledUntil = timeValueNanos(min(mergedThrottledUntil.nanos(), throttledUntil.nanos())); + } + String reasonCancelled = randomBoolean() ? randomAsciiOfLength(10) : null; + BulkByScrollTask.Status merged = new BulkByScrollTask.Status(Arrays.asList(statuses), reasonCancelled); + assertEquals(mergedTotal, merged.getTotal()); + assertEquals(mergedUpdated, merged.getUpdated()); + assertEquals(mergedCreated, merged.getCreated()); + assertEquals(mergedDeleted, merged.getDeleted()); + assertEquals(mergedBatches, merged.getBatches()); + assertEquals(mergedVersionConflicts, merged.getVersionConflicts()); + assertEquals(mergedNoops, merged.getNoops()); + assertEquals(mergedBulkRetries, merged.getBulkRetries()); + assertEquals(mergedSearchRetries, merged.getSearchRetries()); + assertEquals(mergedThrottled, merged.getThrottled()); + assertEquals(mergedRequestsPerSecond, merged.getRequestsPerSecond(), 0.0001f); + assertEquals(mergedThrottledUntil, merged.getThrottledUntil()); + assertEquals(reasonCancelled, merged.getReasonCancelled()); + } + + public void testUnknownVersions() { + assertThat("5.1.0 has been defined, remove the temporary constant", VersionUtils.allVersions(), + not(hasItem(BulkByScrollTask.V_5_1_0_UNRELEASED))); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java index c0c06b14d55..7c8ad56a48a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java @@ -23,6 +23,9 @@ import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeMatcher; +import java.util.Collection; + +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -39,6 +42,7 @@ public class BulkIndexByScrollResponseMatcher extends TypeSafeMatcher versionConflictsMatcher = equalTo(0L); private Matcher failuresMatcher = equalTo(0); private Matcher reasonCancelledMatcher = nullValue(String.class); + private Matcher> slicesMatcher = empty(); public BulkIndexByScrollResponseMatcher created(Matcher createdMatcher) { this.createdMatcher = createdMatcher; @@ -117,6 +121,14 @@ public class BulkIndexByScrollResponseMatcher extends TypeSafeMatcher> slicesMatcher) { + this.slicesMatcher = slicesMatcher; + return this; + } + @Override protected boolean matchesSafely(BulkIndexByScrollResponse item) { return updatedMatcher.matches(item.getUpdated()) && @@ -125,7 +137,8 @@ public class BulkIndexByScrollResponseMatcher extends TypeSafeMatcher responses = new ArrayList<>(mergeCount); + int took = between(1000, 10000); + int tookIndex = between(0, mergeCount - 1); + List allBulkFailures = new ArrayList<>(); + List allSearchFailures = new ArrayList<>(); + boolean timedOut = false; + String reasonCancelled = rarely() ? randomAsciiOfLength(5) : null; + + for (int i = 0; i < mergeCount; i++) { + // One of the merged responses gets the expected value for took, the others get a smaller value + TimeValue thisTook = timeValueMillis(i == tookIndex ? took : between(0, took)); + // The actual status doesn't matter too much - we test merging those elsewhere + String thisReasonCancelled = rarely() ? randomAsciiOfLength(5) : null; + BulkByScrollTask.Status status = new BulkByScrollTask.Status(i, 0, 0, 0, 0, 0, 0, 0, 0, 0, timeValueMillis(0), 0f, + thisReasonCancelled, timeValueMillis(0)); + List bulkFailures = frequently() ? emptyList() + : IntStream.range(0, between(1, 3)).mapToObj(j -> new BulkItemResponse.Failure("idx", "type", "id", new Exception())) + .collect(Collectors.toList()); + allBulkFailures.addAll(bulkFailures); + List searchFailures = frequently() ? emptyList() + : IntStream.range(0, between(1, 3)).mapToObj(j -> new SearchFailure(new Exception())).collect(Collectors.toList()); + allSearchFailures.addAll(searchFailures); + boolean thisTimedOut = rarely(); + timedOut |= thisTimedOut; + responses.add(new BulkIndexByScrollResponse(thisTook, status, bulkFailures, searchFailures, thisTimedOut)); + } + + BulkIndexByScrollResponse merged = new BulkIndexByScrollResponse(responses, reasonCancelled); + + assertEquals(timeValueMillis(took), merged.getTook()); + assertEquals(allBulkFailures, merged.getBulkFailures()); + assertEquals(allSearchFailures, merged.getSearchFailures()); + assertEquals(timedOut, merged.isTimedOut()); + assertEquals(reasonCancelled, merged.getReasonCancelled()); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java index 8da47f1eeaf..0ee39d75674 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java @@ -32,11 +32,10 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.ingest.IngestTestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.TaskInfo; -import org.junit.BeforeClass; +import org.junit.Before; import java.util.ArrayList; import java.util.Collection; -import java.util.List; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -45,7 +44,6 @@ import java.util.stream.IntStream; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -60,9 +58,6 @@ public class CancelTests extends ReindexTestCase { protected static final String INDEX = "reindex-cancel-index"; protected static final String TYPE = "reindex-cancel-type"; - private static final int MIN_OPERATIONS = 2; - private static final int BLOCKING_OPERATIONS = 1; - // Semaphore used to allow & block indexing operations during the test private static final Semaphore ALLOWED_OPERATIONS = new Semaphore(0); @@ -74,8 +69,8 @@ public class CancelTests extends ReindexTestCase { return plugins; } - @BeforeClass - public static void clearAllowedOperations() { + @Before + public void clearAllowedOperations() { ALLOWED_OPERATIONS.drainPermits(); } @@ -85,8 +80,8 @@ public class CancelTests extends ReindexTestCase { private void testCancel(String action, AbstractBulkByScrollRequestBuilder builder, CancelAssertion assertion) throws Exception { createIndex(INDEX); - // Total number of documents created for this test (~10 per primary shard) - int numDocs = getNumShards(INDEX).numPrimaries * 10; + // Total number of documents created for this test (~10 per primary shard per shard) + int numDocs = getNumShards(INDEX).numPrimaries * 10 * builder.request().getSlices(); ALLOWED_OPERATIONS.release(numDocs); indexRandom(true, false, true, IntStream.range(0, numDocs) @@ -100,43 +95,59 @@ public class CancelTests extends ReindexTestCase { // Scroll by 1 so that cancellation is easier to control builder.source().setSize(1); - // Allow a random number of the documents minus 1 - // to be modified by the reindex action - int numModifiedDocs = randomIntBetween(MIN_OPERATIONS, numDocs); - ALLOWED_OPERATIONS.release(numModifiedDocs - BLOCKING_OPERATIONS); + /* Allow a random number of the documents less the number of workers to be modified by the reindex action. That way at least one + * worker is blocked. */ + int numModifiedDocs = randomIntBetween(builder.request().getSlices() * 2, numDocs); + ALLOWED_OPERATIONS.release(numModifiedDocs - builder.request().getSlices()); // Now execute the reindex action... ListenableActionFuture future = builder.execute(); - // ... and waits for the indexing operation listeners to block + /* ... and waits for the indexing operation listeners to block. It is important to realize that some of the workers might have + * exhausted their slice while others might have quite a bit left to work on. We can't control that. */ awaitBusy(() -> ALLOWED_OPERATIONS.hasQueuedThreads() && ALLOWED_OPERATIONS.availablePermits() == 0); // Status should show the task running - ListTasksResponse tasksList = client().admin().cluster().prepareListTasks().setActions(action).setDetailed(true).get(); - assertThat(tasksList.getNodeFailures(), empty()); - assertThat(tasksList.getTaskFailures(), empty()); - assertThat(tasksList.getTasks(), hasSize(1)); - BulkByScrollTask.Status status = (BulkByScrollTask.Status) tasksList.getTasks().get(0).getStatus(); + TaskInfo mainTask = findTaskToCancel(action, builder.request().getSlices()); + BulkByScrollTask.Status status = (BulkByScrollTask.Status) mainTask.getStatus(); assertNull(status.getReasonCancelled()); // Cancel the request while the reindex action is blocked by the indexing operation listeners. // This will prevent further requests from being sent. - List cancelledTasks = client().admin().cluster().prepareCancelTasks().setActions(action).get().getTasks(); - assertThat(cancelledTasks, hasSize(1)); + ListTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setTaskId(mainTask.getTaskId()).get(); + cancelTasksResponse.rethrowFailures("Cancel"); + assertThat(cancelTasksResponse.getTasks(), hasSize(1)); - // The status should now show canceled. The request will still be in the list because it is still blocked. - tasksList = client().admin().cluster().prepareListTasks().setActions(action).setDetailed(true).get(); - assertThat(tasksList.getNodeFailures(), empty()); - assertThat(tasksList.getTaskFailures(), empty()); - assertThat(tasksList.getTasks(), hasSize(1)); - status = (BulkByScrollTask.Status) tasksList.getTasks().get(0).getStatus(); + // The status should now show canceled. The request will still be in the list because it is (or its children are) still blocked. + mainTask = client().admin().cluster().prepareGetTask(mainTask.getTaskId()).get().getTask().getTask(); + status = (BulkByScrollTask.Status) mainTask.getStatus(); assertEquals(CancelTasksRequest.DEFAULT_REASON, status.getReasonCancelled()); + if (builder.request().getSlices() > 1) { + boolean foundCancelled = false; + ListTasksResponse sliceList = client().admin().cluster().prepareListTasks().setParentTaskId(mainTask.getTaskId()) + .setDetailed(true).get(); + sliceList.rethrowFailures("Fetch slice tasks"); + for (TaskInfo slice: sliceList.getTasks()) { + BulkByScrollTask.Status sliceStatus = (BulkByScrollTask.Status) slice.getStatus(); + if (sliceStatus.getReasonCancelled() == null) continue; + assertEquals(CancelTasksRequest.DEFAULT_REASON, sliceStatus.getReasonCancelled()); + foundCancelled = true; + } + assertTrue("Didn't find at least one sub task that was cancelled", foundCancelled); + } - // Unblock the last operation - ALLOWED_OPERATIONS.release(BLOCKING_OPERATIONS); + // Unblock the last operations + ALLOWED_OPERATIONS.release(builder.request().getSlices()); // Checks that no more operations are executed - assertBusy(() -> assertTrue(ALLOWED_OPERATIONS.availablePermits() == 0 && ALLOWED_OPERATIONS.getQueueLength() == 0)); + assertBusy(() -> { + if (builder.request().getSlices() == 1) { + /* We can only be sure that we've drained all the permits if we only use a single worker. Otherwise some worker may have + * exhausted all of its documents before we blocked. */ + assertEquals(0, ALLOWED_OPERATIONS.availablePermits()); + } + assertEquals(0, ALLOWED_OPERATIONS.getQueueLength()); + }); // And check the status of the response BulkIndexByScrollResponse response = future.get(); @@ -144,10 +155,30 @@ public class CancelTests extends ReindexTestCase { assertThat(response.getBulkFailures(), emptyIterable()); assertThat(response.getSearchFailures(), emptyIterable()); + if (builder.request().getSlices() >= 1) { + // If we have more than one worker we might not have made all the modifications + numModifiedDocs -= ALLOWED_OPERATIONS.availablePermits(); + } flushAndRefresh(INDEX); assertion.assertThat(response, numDocs, numModifiedDocs); } + private TaskInfo findTaskToCancel(String actionName, int workerCount) { + ListTasksResponse tasks; + long start = System.nanoTime(); + do { + tasks = client().admin().cluster().prepareListTasks().setActions(actionName).setDetailed(true).get(); + tasks.rethrowFailures("Find tasks to cancel"); + for (TaskInfo taskInfo : tasks.getTasks()) { + // Skip tasks with a parent because those are children of the task we want to cancel + if (false == taskInfo.getParentTaskId().isSet()) { + return taskInfo; + } + } + } while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)); + throw new AssertionError("Couldn't find task to rethrottle after waiting tasks=" + tasks.getTasks()); + } + public void testReindexCancel() throws Exception { testCancel(ReindexAction.NAME, reindex().source(INDEX).destination("dest", TYPE), (response, total, modified) -> { assertThat(response, matcher().created(modified).reasonCancelled(equalTo("by user request"))); @@ -164,14 +195,14 @@ public class CancelTests extends ReindexTestCase { " \"test\" : {}\n" + " } ]\n" + "}"); - assertAcked(client().admin().cluster().preparePutPipeline("set-foo", pipeline).get()); + assertAcked(client().admin().cluster().preparePutPipeline("set-processed", pipeline).get()); - testCancel(UpdateByQueryAction.NAME, updateByQuery().setPipeline("set-foo").source(INDEX), (response, total, modified) -> { + testCancel(UpdateByQueryAction.NAME, updateByQuery().setPipeline("set-processed").source(INDEX), (response, total, modified) -> { assertThat(response, matcher().updated(modified).reasonCancelled(equalTo("by user request"))); assertHitCount(client().prepareSearch(INDEX).setSize(0).setQuery(termQuery("processed", true)).get(), modified); }); - assertAcked(client().admin().cluster().deletePipeline(new DeletePipelineRequest("set-foo")).get()); + assertAcked(client().admin().cluster().deletePipeline(new DeletePipelineRequest("set-processed")).get()); } public void testDeleteByQueryCancel() throws Exception { @@ -181,8 +212,42 @@ public class CancelTests extends ReindexTestCase { }); } + public void testReindexCancelWithWorkers() throws Exception { + testCancel(ReindexAction.NAME, reindex().source(INDEX).destination("dest", TYPE).setSlices(5), (response, total, modified) -> { + assertThat(response, matcher().created(modified).reasonCancelled(equalTo("by user request")).slices(hasSize(5))); + + refresh("dest"); + assertHitCount(client().prepareSearch("dest").setTypes(TYPE).setSize(0).get(), modified); + }); + } + + public void testUpdateByQueryCancelWithWorkers() throws Exception { + BytesReference pipeline = new BytesArray("{\n" + + " \"description\" : \"sets processed to true\",\n" + + " \"processors\" : [ {\n" + + " \"test\" : {}\n" + + " } ]\n" + + "}"); + assertAcked(client().admin().cluster().preparePutPipeline("set-processed", pipeline).get()); + + testCancel(UpdateByQueryAction.NAME, updateByQuery().setPipeline("set-processed").source(INDEX).setSlices(5), + (response, total, modified) -> { + assertThat(response, matcher().updated(modified).reasonCancelled(equalTo("by user request")).slices(hasSize(5))); + assertHitCount(client().prepareSearch(INDEX).setSize(0).setQuery(termQuery("processed", true)).get(), modified); + }); + + assertAcked(client().admin().cluster().deletePipeline(new DeletePipelineRequest("set-processed")).get()); + } + + public void testDeleteByQueryCancelWithWorkers() throws Exception { + testCancel(DeleteByQueryAction.NAME, deleteByQuery().source(INDEX).setSlices(5), (response, total, modified) -> { + assertThat(response, matcher().deleted(modified).reasonCancelled(equalTo("by user request")).slices(hasSize(5))); + assertHitCount(client().prepareSearch(INDEX).setSize(0).get(), total - modified); + }); + } + /** - * {@link CancelAssertion} is used to check the result of the cancel test. + * Used to check the result of the cancel test. */ private interface CancelAssertion { void assertThat(BulkIndexByScrollResponse response, int total, int modified); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java index a52b46233bc..d40aed4b898 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java @@ -34,6 +34,7 @@ import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.hasSize; public class DeleteByQueryBasicTests extends ReindexTestCase { @@ -208,4 +209,27 @@ public class DeleteByQueryBasicTests extends ReindexTestCase { assertHitCount(client().prepareSearch("test").setSize(0).get(), docs); } + + public void testWorkers() throws Exception { + indexRandom(true, + client().prepareIndex("test", "test", "1").setSource("foo", "a"), + client().prepareIndex("test", "test", "2").setSource("foo", "a"), + client().prepareIndex("test", "test", "3").setSource("foo", "b"), + client().prepareIndex("test", "test", "4").setSource("foo", "c"), + client().prepareIndex("test", "test", "5").setSource("foo", "d"), + client().prepareIndex("test", "test", "6").setSource("foo", "e"), + client().prepareIndex("test", "test", "7").setSource("foo", "f") + ); + assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 7); + + // Deletes the two docs that matches "foo:a" + assertThat(deleteByQuery().source("test").filter(termQuery("foo", "a")).refresh(true).setSlices(5).get(), + matcher().deleted(2).slices(hasSize(5))); + assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 5); + + // Delete remaining docs + DeleteByQueryRequestBuilder request = deleteByQuery().source("test").refresh(true).setSlices(5); + assertThat(request.get(), matcher().deleted(5).slices(hasSize(5))); + assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 0); + } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java index 6b615f45521..187dfb847f6 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java @@ -21,11 +21,10 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.test.ESTestCase; import static org.apache.lucene.util.TestUtil.randomSimpleString; -public class DeleteByQueryRequestTests extends ESTestCase { +public class DeleteByQueryRequestTests extends AbstractBulkByScrollRequestTestCase { public void testDeleteteByQueryRequestImplementsIndicesRequestReplaceable() { int numIndices = between(1, 100); String[] indices = new String[numIndices]; @@ -58,4 +57,19 @@ public class DeleteByQueryRequestTests extends ESTestCase { assertEquals(newIndices[i], request.getSearchRequest().indices()[i]); } } + + @Override + protected DeleteByQueryRequest newRequest() { + return new DeleteByQueryRequest(new SearchRequest(randomAsciiOfLength(5))); + } + + @Override + protected void extraRandomizationForSlice(DeleteByQueryRequest original) { + // Nothing else to randomize + } + + @Override + protected void extraForSliceAssertions(DeleteByQueryRequest original, DeleteByQueryRequest forSliced) { + // No extra assertions needed + } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ParentBulkByScrollTaskTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ParentBulkByScrollTaskTests.java new file mode 100644 index 00000000000..f764cc60917 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ParentBulkByScrollTaskTests.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.Arrays; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.elasticsearch.index.reindex.TransportRethrottleActionTests.captureResponse; +import static org.elasticsearch.index.reindex.TransportRethrottleActionTests.neverCalled; +import static org.mockito.Mockito.mock; + +public class ParentBulkByScrollTaskTests extends ESTestCase { + private int slices; + private ParentBulkByScrollTask task; + + @Before + public void createTask() { + slices = between(2, 50); + task = new ParentBulkByScrollTask(1, "test_type", "test_action", "test", null, slices); + } + + public void testBasicData() { + assertEquals(1, task.getId()); + assertEquals("test_type", task.getType()); + assertEquals("test_action", task.getAction()); + assertEquals("test", task.getDescription()); + } + + public void testProgress() { + long total = 0; + long created = 0; + long updated = 0; + long deleted = 0; + long noops = 0; + long versionConflicts = 0; + int batches = 0; + List sliceStatuses = Arrays.asList(new BulkByScrollTask.StatusOrException[slices]); + BulkByScrollTask.Status status = task.getStatus(); + assertEquals(total, status.getTotal()); + assertEquals(created, status.getCreated()); + assertEquals(updated, status.getUpdated()); + assertEquals(deleted, status.getDeleted()); + assertEquals(noops, status.getNoops()); + assertEquals(versionConflicts, status.getVersionConflicts()); + assertEquals(batches, status.getBatches()); + assertEquals(sliceStatuses, status.getSliceStatuses()); + + for (int slice = 0; slice < slices; slice++) { + int thisTotal = between(10, 10000); + int thisCreated = between(0, thisTotal); + int thisUpdated = between(0, thisTotal - thisCreated); + int thisDeleted = between(0, thisTotal - thisCreated - thisUpdated); + int thisNoops = thisTotal - thisCreated - thisUpdated - thisDeleted; + int thisVersionConflicts = between(0, 1000); + int thisBatches = between(1, 100); + BulkByScrollTask.Status sliceStatus = new BulkByScrollTask.Status(slice, thisTotal, thisUpdated, thisCreated, thisDeleted, + thisBatches, thisVersionConflicts, thisNoops, 0, 0, timeValueMillis(0), 0, null, timeValueMillis(0)); + total += thisTotal; + created += thisCreated; + updated += thisUpdated; + deleted += thisDeleted; + noops += thisNoops; + versionConflicts += thisVersionConflicts; + batches += thisBatches; + sliceStatuses.set(slice, new BulkByScrollTask.StatusOrException(sliceStatus)); + + @SuppressWarnings("unchecked") + ActionListener listener = slice < slices - 1 ? neverCalled() : mock(ActionListener.class); + task.onSliceResponse(listener, slice, + new BulkIndexByScrollResponse(timeValueMillis(10), sliceStatus, emptyList(), emptyList(), false)); + + status = task.getStatus(); + assertEquals(total, status.getTotal()); + assertEquals(created, status.getCreated()); + assertEquals(updated, status.getUpdated()); + assertEquals(deleted, status.getDeleted()); + assertEquals(versionConflicts, status.getVersionConflicts()); + assertEquals(batches, status.getBatches()); + assertEquals(noops, status.getNoops()); + assertEquals(sliceStatuses, status.getSliceStatuses()); + + if (slice == slices - 1) { + // The whole thing succeeded so we should have got the success + status = captureResponse(BulkIndexByScrollResponse.class, listener).getStatus(); + assertEquals(total, status.getTotal()); + assertEquals(created, status.getCreated()); + assertEquals(updated, status.getUpdated()); + assertEquals(deleted, status.getDeleted()); + assertEquals(versionConflicts, status.getVersionConflicts()); + assertEquals(batches, status.getBatches()); + assertEquals(noops, status.getNoops()); + assertEquals(sliceStatuses, status.getSliceStatuses()); + } + } + } + + +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java index 5742a37f11e..950e98844d1 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java @@ -26,6 +26,9 @@ import java.util.List; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThanOrEqualTo; public class ReindexBasicTests extends ReindexTestCase { public void testFiltering() throws Exception { @@ -82,4 +85,34 @@ public class ReindexBasicTests extends ReindexTestCase { assertThat(copy.get(), matcher().created(half).batches(half, 5)); assertHitCount(client().prepareSearch("dest").setTypes("half").setSize(0).get(), half); } + + public void testCopyManyWithSlices() throws Exception { + int workers = between(2, 10); + + List docs = new ArrayList<>(); + int max = between(150, 500); + for (int i = 0; i < max; i++) { + docs.add(client().prepareIndex("source", "test", Integer.toString(i)).setSource("foo", "a")); + } + + indexRandom(true, docs); + assertHitCount(client().prepareSearch("source").setSize(0).get(), max); + + // Copy all the docs + ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all").refresh(true).setSlices(workers); + // Use a small batch size so we have to use more than one batch + copy.source().setSize(5); + assertThat(copy.get(), matcher().created(max).batches(greaterThanOrEqualTo(max / 5)).slices(hasSize(workers))); + assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), max); + + // Copy some of the docs + int half = max / 2; + copy = reindex().source("source").destination("dest", "half").refresh(true).setSlices(workers); + // Use a small batch size so we have to use more than one batch + copy.source().setSize(5); + copy.size(half); // The real "size" of the request. + BulkIndexByScrollResponse response = copy.get(); + assertThat(response, matcher().created(lessThanOrEqualTo((long) half)).slices(hasSize(workers))); + assertHitCount(client().prepareSearch("dest").setTypes("half").setSize(0).get(), response.getCreated()); + } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexClientYamlTestSuiteIT.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexClientYamlTestSuiteIT.java index 186bb2f0a5e..54483eae569 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexClientYamlTestSuiteIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexClientYamlTestSuiteIT.java @@ -35,6 +35,6 @@ public class ReindexClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index b81be4a1bb2..9bfa41da7f3 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -28,7 +28,7 @@ import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE; +import static org.elasticsearch.action.DocWriteRequest.OpType.CREATE; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java index 2e41550134e..239ce43fdc5 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java @@ -20,75 +20,100 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.test.ESTestCase; -import org.junit.Before; -import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.HashSet; -import java.util.Set; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; +import static java.util.Collections.singletonList; +import static org.elasticsearch.index.reindex.TransportReindexAction.buildRemoteWhitelist; import static org.elasticsearch.index.reindex.TransportReindexAction.checkRemoteWhitelist; /** * Tests the reindex-from-remote whitelist of remotes. */ public class ReindexFromRemoteWhitelistTests extends ESTestCase { - private TransportAddress localhost; - - @Before - public void setupLocalhost() throws UnknownHostException { - localhost = new InetSocketTransportAddress(InetAddress.getByAddress(new byte[] { 0x7f, 0x00, 0x00, 0x01 }), 9200); - } - public void testLocalRequestWithoutWhitelist() { - checkRemoteWhitelist(emptySet(), null, localhostOrNone()); + checkRemoteWhitelist(buildRemoteWhitelist(emptyList()), null); } public void testLocalRequestWithWhitelist() { - checkRemoteWhitelist(randomWhitelist(), null, localhostOrNone()); + checkRemoteWhitelist(buildRemoteWhitelist(randomWhitelist()), null); } public void testWhitelistedRemote() { - Set whitelist = randomWhitelist(); + List whitelist = randomWhitelist(); String[] inList = whitelist.iterator().next().split(":"); String host = inList[0]; int port = Integer.valueOf(inList[1]); - checkRemoteWhitelist(whitelist, new RemoteInfo(randomAsciiOfLength(5), host, port, new BytesArray("test"), null, null, emptyMap()), - localhostOrNone()); + checkRemoteWhitelist(buildRemoteWhitelist(whitelist), + new RemoteInfo(randomAsciiOfLength(5), host, port, new BytesArray("test"), null, null, emptyMap())); } - public void testMyselfInWhitelistRemote() throws UnknownHostException { - Set whitelist = randomWhitelist(); - whitelist.add("myself"); - TransportAddress publishAddress = new InetSocketTransportAddress(InetAddress.getByAddress(new byte[] {0x7f,0x00,0x00,0x01}), 9200); - checkRemoteWhitelist(whitelist, - new RemoteInfo(randomAsciiOfLength(5), "127.0.0.1", 9200, new BytesArray("test"), null, null, emptyMap()), publishAddress); + public void testWhitelistedByPrefix() { + checkRemoteWhitelist(buildRemoteWhitelist(singletonList("*.example.com:9200")), + new RemoteInfo(randomAsciiOfLength(5), "es.example.com", 9200, new BytesArray("test"), null, null, emptyMap())); + checkRemoteWhitelist(buildRemoteWhitelist(singletonList("*.example.com:9200")), + new RemoteInfo(randomAsciiOfLength(5), "6e134134a1.us-east-1.aws.example.com", 9200, + new BytesArray("test"), null, null, emptyMap())); + } + + public void testWhitelistedBySuffix() { + checkRemoteWhitelist(buildRemoteWhitelist(singletonList("es.example.com:*")), + new RemoteInfo(randomAsciiOfLength(5), "es.example.com", 9200, new BytesArray("test"), null, null, emptyMap())); + } + + public void testWhitelistedByInfix() { + checkRemoteWhitelist(buildRemoteWhitelist(singletonList("es*.example.com:9200")), + new RemoteInfo(randomAsciiOfLength(5), "es1.example.com", 9200, new BytesArray("test"), null, null, emptyMap())); + } + + + public void testLoopbackInWhitelistRemote() throws UnknownHostException { + List whitelist = randomWhitelist(); + whitelist.add("127.0.0.1:*"); + checkRemoteWhitelist(buildRemoteWhitelist(whitelist), + new RemoteInfo(randomAsciiOfLength(5), "127.0.0.1", 9200, new BytesArray("test"), null, null, emptyMap())); } public void testUnwhitelistedRemote() { int port = between(1, Integer.MAX_VALUE); RemoteInfo remoteInfo = new RemoteInfo(randomAsciiOfLength(5), "not in list", port, new BytesArray("test"), null, null, emptyMap()); + List whitelist = randomBoolean() ? randomWhitelist() : emptyList(); Exception e = expectThrows(IllegalArgumentException.class, - () -> checkRemoteWhitelist(randomWhitelist(), remoteInfo, localhostOrNone())); + () -> checkRemoteWhitelist(buildRemoteWhitelist(whitelist), remoteInfo)); assertEquals("[not in list:" + port + "] not whitelisted in reindex.remote.whitelist", e.getMessage()); } - private Set randomWhitelist() { - int size = between(1, 100); - Set set = new HashSet<>(size); - while (set.size() < size) { - set.add(randomAsciiOfLength(5) + ':' + between(1, Integer.MAX_VALUE)); - } - return set; + public void testRejectMatchAll() { + assertMatchesTooMuch(singletonList("*")); + assertMatchesTooMuch(singletonList("**")); + assertMatchesTooMuch(singletonList("***")); + assertMatchesTooMuch(Arrays.asList("realstuff", "*")); + assertMatchesTooMuch(Arrays.asList("*", "realstuff")); + List random = randomWhitelist(); + random.add("*"); + assertMatchesTooMuch(random); } - private TransportAddress localhostOrNone() { - return randomFrom(random(), null, localhost); + private void assertMatchesTooMuch(List whitelist) { + Exception e = expectThrows(IllegalArgumentException.class, () -> buildRemoteWhitelist(whitelist)); + assertEquals("Refusing to start because whitelist " + whitelist + " accepts all addresses. " + + "This would allow users to reindex-from-remote any URL they like effectively having Elasticsearch make HTTP GETs " + + "for them.", e.getMessage()); + } + + private List randomWhitelist() { + int size = between(1, 100); + List whitelist = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + whitelist.add(randomAsciiOfLength(5) + ':' + between(1, Integer.MAX_VALUE)); + } + return whitelist; } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java index 5c7a90157e7..b4ac273b43b 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -72,7 +72,7 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase { // Weird incantation required to test with netty settings.put(NetworkModule.HTTP_ENABLED.getKey(), true); // Whitelist reindexing from the http host we're going to use - settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "myself"); + settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "127.0.0.1:*"); settings.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME); return settings.build(); } @@ -89,7 +89,7 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase { } public void testReindexFromRemoteWithAuthentication() throws Exception { - RemoteInfo remote = new RemoteInfo("http", address.getHost(), address.getPort(), new BytesArray("{\"match_all\":{}}"), "Aladdin", + RemoteInfo remote = new RemoteInfo("http", address.getAddress(), address.getPort(), new BytesArray("{\"match_all\":{}}"), "Aladdin", "open sesame", emptyMap()); ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest") .setRemoteInfo(remote); @@ -97,8 +97,8 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase { } public void testReindexSendsHeaders() throws Exception { - RemoteInfo remote = new RemoteInfo("http", address.getHost(), address.getPort(), new BytesArray("{\"match_all\":{}}"), null, null, - singletonMap(TestFilter.EXAMPLE_HEADER, "doesn't matter")); + RemoteInfo remote = new RemoteInfo("http", address.getAddress(), address.getPort(), new BytesArray("{\"match_all\":{}}"), null, + null, singletonMap(TestFilter.EXAMPLE_HEADER, "doesn't matter")); ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest") .setRemoteInfo(remote); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> request.get()); @@ -107,8 +107,8 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase { } public void testReindexWithoutAuthenticationWhenRequired() throws Exception { - RemoteInfo remote = new RemoteInfo("http", address.getHost(), address.getPort(), new BytesArray("{\"match_all\":{}}"), null, null, - emptyMap()); + RemoteInfo remote = new RemoteInfo("http", address.getAddress(), address.getPort(), new BytesArray("{\"match_all\":{}}"), null, + null, emptyMap()); ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest") .setRemoteInfo(remote); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> request.get()); @@ -118,7 +118,7 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase { } public void testReindexWithBadAuthentication() throws Exception { - RemoteInfo remote = new RemoteInfo("http", address.getHost(), address.getPort(), new BytesArray("{\"match_all\":{}}"), "junk", + RemoteInfo remote = new RemoteInfo("http", address.getAddress(), address.getPort(), new BytesArray("{\"match_all\":{}}"), "junk", "auth", emptyMap()); ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest") .setRemoteInfo(remote); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParallelizationHelperTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParallelizationHelperTests.java new file mode 100644 index 00000000000..f1effa70ca8 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParallelizationHelperTests.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.index.reindex.ReindexParallelizationHelper.sliceIntoSubRequests; +import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest; +import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchSourceBuilder; + +public class ReindexParallelizationHelperTests extends ESTestCase { + public void testSliceIntoSubRequests() throws IOException { + SearchRequest searchRequest = randomSearchRequest(() -> randomSearchSourceBuilder( + () -> null, + () -> null, + () -> null, + () -> emptyList())); + if (searchRequest.source() != null) { + // Clear the slice builder if there is one set. We can't call sliceIntoSubRequests if it is. + searchRequest.source().slice(null); + } + int times = between(2, 100); + String field = randomBoolean() ? UidFieldMapper.NAME : randomAsciiOfLength(5); + int currentSliceId = 0; + for (SearchRequest slice : sliceIntoSubRequests(searchRequest, field, times)) { + assertEquals(field, slice.source().slice().getField()); + assertEquals(currentSliceId, slice.source().slice().getId()); + assertEquals(times, slice.source().slice().getMax()); + + // If you clear the slice then the slice should be the same request as the parent request + slice.source().slice(null); + if (searchRequest.source() == null) { + // Except that adding the slice might have added an empty builder + searchRequest.source(new SearchSourceBuilder()); + } + assertEquals(searchRequest, slice); + currentSliceId++; + } + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java index 0455e43ec09..559d0b54565 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java @@ -24,7 +24,8 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.index.reindex.remote.RemoteInfo; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.slice.SliceBuilder; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -32,9 +33,9 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; /** * Tests some of the validation of {@linkplain ReindexRequest}. See reindex's rest tests for much more. */ -public class ReindexRequestTests extends ESTestCase { +public class ReindexRequestTests extends AbstractBulkByScrollRequestTestCase { public void testTimestampAndTtlNotAllowed() { - ReindexRequest reindex = request(); + ReindexRequest reindex = newRequest(); reindex.getDestination().ttl("1s").timestamp("now"); ActionRequestValidationException e = reindex.validate(); assertEquals("Validation Failed: 1: setting ttl on destination isn't supported. use scripts instead.;" @@ -43,7 +44,7 @@ public class ReindexRequestTests extends ESTestCase { } public void testReindexFromRemoteDoesNotSupportSearchQuery() { - ReindexRequest reindex = request(); + ReindexRequest reindex = newRequest(); reindex.setRemoteInfo(new RemoteInfo(randomAsciiOfLength(5), randomAsciiOfLength(5), between(1, Integer.MAX_VALUE), new BytesArray("real_query"), null, null, emptyMap())); reindex.getSearchRequest().source().query(matchAllQuery()); // Unsupported place to put query @@ -52,7 +53,45 @@ public class ReindexRequestTests extends ESTestCase { e.getMessage()); } - private ReindexRequest request() { + public void testReindexFromRemoteDoesNotSupportWorkers() { + ReindexRequest reindex = newRequest(); + reindex.setRemoteInfo(new RemoteInfo(randomAsciiOfLength(5), randomAsciiOfLength(5), between(1, Integer.MAX_VALUE), + new BytesArray("real_query"), null, null, emptyMap())); + reindex.setSlices(between(2, Integer.MAX_VALUE)); + ActionRequestValidationException e = reindex.validate(); + assertEquals( + "Validation Failed: 1: reindex from remote sources doesn't support workers > 1 but was [" + reindex.getSlices() + "];", + e.getMessage()); + } + + public void testNoSliceWithWorkers() { + ReindexRequest reindex = newRequest(); + reindex.getSearchRequest().source().slice(new SliceBuilder(0, 4)); + reindex.setSlices(between(2, Integer.MAX_VALUE)); + ActionRequestValidationException e = reindex.validate(); + assertEquals("Validation Failed: 1: can't specify both slice and workers;", e.getMessage()); + } + + @Override + protected void extraRandomizationForSlice(ReindexRequest original) { + if (randomBoolean()) { + original.setScript(new Script(randomAsciiOfLength(5))); + } + if (randomBoolean()) { + original.setRemoteInfo(new RemoteInfo(randomAsciiOfLength(5), randomAsciiOfLength(5), between(1, 10000), + new BytesArray(randomAsciiOfLength(5)), null, null, emptyMap())); + } + } + + @Override + protected void extraForSliceAssertions(ReindexRequest original, ReindexRequest forSliced) { + assertEquals(original.getScript(), forSliced.getScript()); + assertEquals(original.getDestination(), forSliced.getDestination()); + assertEquals(original.getRemoteInfo(), forSliced.getRemoteInfo()); + } + + @Override + protected ReindexRequest newRequest() { ReindexRequest reindex = new ReindexRequest(new SearchRequest(), new IndexRequest()); reindex.getSearchRequest().indices("source"); reindex.getDestination().index("dest"); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java index 2988fcb5ca6..1ab0613103f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexVersioningTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.get.GetResponse; -import static org.elasticsearch.action.index.IndexRequest.OpType.CREATE; +import static org.elasticsearch.action.DocWriteRequest.OpType.CREATE; import static org.elasticsearch.index.VersionType.EXTERNAL; import static org.elasticsearch.index.VersionType.INTERNAL; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java index 7abd1212f6c..afc08ed0587 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java @@ -21,8 +21,21 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.tasks.TaskId; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; /** * Tests that you can set requests_per_second over the Java API and that you can rethrottle running requests. There are REST tests for this @@ -43,31 +56,123 @@ public class RethrottleTests extends ReindexTestCase { testCase(deleteByQuery().source("test"), DeleteByQueryAction.NAME); } - private void testCase(AbstractBulkByScrollRequestBuilder request, String actionName) - throws Exception { - // Use a single shard so the reindex has to happen in multiple batches - client().admin().indices().prepareCreate("test").setSettings("index.number_of_shards", 1).get(); - indexRandom(true, - client().prepareIndex("test", "test", "1").setSource("foo", "bar"), - client().prepareIndex("test", "test", "2").setSource("foo", "bar"), - client().prepareIndex("test", "test", "3").setSource("foo", "bar")); + public void testReindexWithWorkers() throws Exception { + testCase(reindex().source("test").destination("dest").setSlices(between(2, 10)), ReindexAction.NAME); + } + + public void testUpdateByQueryWithWorkers() throws Exception { + testCase(updateByQuery().source("test").setSlices(between(2, 10)), UpdateByQueryAction.NAME); + } + + public void testDeleteByQueryWithWorkers() throws Exception { + testCase(deleteByQuery().source("test").setSlices(between(2, 10)), DeleteByQueryAction.NAME); + } + + private void testCase(AbstractBulkByScrollRequestBuilder request, String actionName) throws Exception { + logger.info("Starting test for [{}] with [{}] slices", actionName, request.request().getSlices()); + /* Add ten documents per slice so most slices will have many documents to process, having to go to multiple batches. + * we can't rely on all of them doing so, but + */ + List docs = new ArrayList<>(); + for (int i = 0; i < request.request().getSlices() * 10; i++) { + docs.add(client().prepareIndex("test", "test", Integer.toString(i)).setSource("foo", "bar")); + } + indexRandom(true, docs); // Start a request that will never finish unless we rethrottle it request.setRequestsPerSecond(.000001f); // Throttle "forever" request.source().setSize(1); // Make sure we use multiple batches ListenableActionFuture responseListener = request.execute(); - // Wait for the task to start - assertBusy(() -> assertEquals(1, client().admin().cluster().prepareListTasks().setActions(actionName).get().getTasks().size())); + TaskGroup taskGroupToRethrottle = findTaskToRethrottle(actionName, request.request().getSlices()); + TaskId taskToRethrottle = taskGroupToRethrottle.getTaskInfo().getTaskId(); + + if (request.request().getSlices() == 1) { + assertThat(taskGroupToRethrottle.getChildTasks(), empty()); + } else { + // There should be a sane number of child tasks running + assertThat(taskGroupToRethrottle.getChildTasks(), + hasSize(allOf(greaterThanOrEqualTo(1), lessThanOrEqualTo(request.request().getSlices())))); + // Wait for all of the sub tasks to start (or finish, some might finish early, all that matters is that not all do) + assertBusy(() -> { + BulkByScrollTask.Status parent = (BulkByScrollTask.Status) client().admin().cluster().prepareGetTask(taskToRethrottle).get() + .getTask().getTask().getStatus(); + long finishedSubTasks = parent.getSliceStatuses().stream().filter(s -> s != null).count(); + ListTasksResponse list = client().admin().cluster().prepareListTasks().setParentTaskId(taskToRethrottle).get(); + list.rethrowFailures("subtasks"); + assertThat(finishedSubTasks + list.getTasks().size(), greaterThanOrEqualTo((long) request.request().getSlices())); + assertThat(list.getTasks().size(), greaterThan(0)); + }); + } // Now rethrottle it so it'll finish - ListTasksResponse rethrottleResponse = rethrottle().setActions(actionName).setRequestsPerSecond(Float.POSITIVE_INFINITY).get(); + float newRequestsPerSecond = randomBoolean() ? Float.POSITIVE_INFINITY : between(1, 1000) * 100000; // No throttle or "very fast" + ListTasksResponse rethrottleResponse = rethrottle().setTaskId(taskToRethrottle).setRequestsPerSecond(newRequestsPerSecond).get(); + rethrottleResponse.rethrowFailures("Rethrottle"); assertThat(rethrottleResponse.getTasks(), hasSize(1)); BulkByScrollTask.Status status = (BulkByScrollTask.Status) rethrottleResponse.getTasks().get(0).getStatus(); - assertEquals(Float.POSITIVE_INFINITY, status.getRequestsPerSecond(), Float.MIN_NORMAL); + // Now check the resulting requests per second. + if (request.request().getSlices() == 1) { + // If there is a single slice it should match perfectly + assertEquals(newRequestsPerSecond, status.getRequestsPerSecond(), Float.MIN_NORMAL); + } else { + /* Check that at least one slice was rethrottled. We won't always rethrottle all of them because they might have completed. + * With multiple slices these numbers might not add up perfectly, thus the 0.0001f. */ + float expectedSliceRequestsPerSecond = newRequestsPerSecond == Float.POSITIVE_INFINITY ? Float.POSITIVE_INFINITY + : newRequestsPerSecond / request.request().getSlices(); + boolean oneSliceRethrottled = false; + float totalRequestsPerSecond = 0; + for (BulkByScrollTask.StatusOrException statusOrException : status.getSliceStatuses()) { + if (statusOrException == null) { + /* The slice can be null here because it was completed but hadn't reported its success back to the task when the + * rethrottle request came through. */ + continue; + } + assertNull(statusOrException.getException()); + BulkByScrollTask.Status slice = statusOrException.getStatus(); + if (slice.getTotal() > slice.getSuccessfullyProcessed()) { + assertEquals(expectedSliceRequestsPerSecond, slice.getRequestsPerSecond(), expectedSliceRequestsPerSecond * 0.0001f); + } + if (Math.abs(expectedSliceRequestsPerSecond - slice.getRequestsPerSecond()) <= expectedSliceRequestsPerSecond * 0.0001f + || expectedSliceRequestsPerSecond == slice.getRequestsPerSecond()) { + oneSliceRethrottled = true; + } + totalRequestsPerSecond += slice.getRequestsPerSecond(); + } + assertTrue("At least one slice must be rethrottled", oneSliceRethrottled); + + /* Now assert that the parent request has the total requests per second. This is a much weaker assertion than that the parent + * actually has the newRequestsPerSecond. For the most part it will. Sometimes it'll be greater because only unfinished requests + * are rethrottled, the finished ones just keep whatever requests per second they had while they were running. But it might + * also be less than newRequestsPerSecond because the newRequestsPerSecond is divided among running sub-requests and then the + * requests are rethrottled. If one request finishes in between the division and the application of the new throttle then it + * won't be rethrottled, thus only contributing its lower total. */ + assertEquals(totalRequestsPerSecond, status.getRequestsPerSecond(), totalRequestsPerSecond * 0.0001f); + } // Now the response should come back quickly because we've rethrottled the request BulkIndexByScrollResponse response = responseListener.get(); - assertEquals("Batches didn't match, this may invalidate the test as throttling is done between batches", 3, response.getBatches()); + assertThat("Entire request completed in a single batch. This may invalidate the test as throttling is done between batches.", + response.getBatches(), greaterThanOrEqualTo(request.request().getSlices())); + } + + private TaskGroup findTaskToRethrottle(String actionName, int sliceCount) { + long start = System.nanoTime(); + do { + ListTasksResponse tasks = client().admin().cluster().prepareListTasks().setActions(actionName).setDetailed(true).get(); + tasks.rethrowFailures("Finding tasks to rethrottle"); + assertThat(tasks.getTaskGroups(), hasSize(lessThan(2))); + if (0 == tasks.getTaskGroups().size()) { + continue; + } + TaskGroup taskGroup = tasks.getTaskGroups().get(0); + if (sliceCount != 1 && taskGroup.getChildTasks().size() == 0) { + // If there are child tasks wait for at least one to start + continue; + } + return taskGroup; + } while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)); + throw new AssertionError("Couldn't find tasks to rethrottle. Here are the running tasks " + + client().admin().cluster().prepareListTasks().get()); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index 92e2598a5aa..ccd3e5a873e 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -109,7 +109,7 @@ public class RetryTests extends ESSingleNodeTestCase { // Enable http so we can test retries on reindex from remote. In this case the "remote" cluster is just this cluster. settings.put(NetworkModule.HTTP_ENABLED.getKey(), true); // Whitelist reindexing from the http host we're going to use - settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "myself"); + settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "127.0.0.1:*"); if (useNetty3) { settings.put(NetworkModule.HTTP_TYPE_KEY, Netty3Plugin.NETTY_HTTP_TRANSPORT_NAME); settings.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty3Plugin.NETTY_TRANSPORT_NAME); @@ -125,8 +125,8 @@ public class RetryTests extends ESSingleNodeTestCase { public void testReindexFromRemote() throws Exception { NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); TransportAddress address = nodeInfo.getHttp().getAddress().publishAddress(); - RemoteInfo remote = new RemoteInfo("http", address.getHost(), address.getPort(), new BytesArray("{\"match_all\":{}}"), null, null, - emptyMap()); + RemoteInfo remote = new RemoteInfo("http", address.getAddress(), address.getPort(), new BytesArray("{\"match_all\":{}}"), null, + null, emptyMap()); ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest") .setRemoteInfo(remote); testCase(ReindexAction.NAME, request, matcher().created(DOC_COUNT)); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java index 1a262a32d3d..38bc20c4891 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java @@ -20,34 +20,40 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.IntStream; import static java.lang.Math.abs; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; +import static java.util.stream.Collectors.toList; import static org.apache.lucene.util.TestUtil.randomSimpleString; import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.hamcrest.Matchers.hasSize; /** * Round trip tests for all Streamable things declared in this plugin. @@ -73,19 +79,18 @@ public class RoundTripTests extends ESTestCase { ReindexRequest tripped = new ReindexRequest(); roundTrip(reindex, tripped); assertRequestEquals(reindex, tripped); - assertEquals(reindex.getDestination().version(), tripped.getDestination().version()); - assertEquals(reindex.getDestination().index(), tripped.getDestination().index()); - if (reindex.getRemoteInfo() == null) { - assertNull(tripped.getRemoteInfo()); - } else { - assertNotNull(tripped.getRemoteInfo()); - assertEquals(reindex.getRemoteInfo().getScheme(), tripped.getRemoteInfo().getScheme()); - assertEquals(reindex.getRemoteInfo().getHost(), tripped.getRemoteInfo().getHost()); - assertEquals(reindex.getRemoteInfo().getQuery(), tripped.getRemoteInfo().getQuery()); - assertEquals(reindex.getRemoteInfo().getUsername(), tripped.getRemoteInfo().getUsername()); - assertEquals(reindex.getRemoteInfo().getPassword(), tripped.getRemoteInfo().getPassword()); - assertEquals(reindex.getRemoteInfo().getHeaders(), tripped.getRemoteInfo().getHeaders()); - } + + // Try slices with a version that doesn't support slices. That should fail. + reindex.setSlices(between(2, 1000)); + Exception e = expectThrows(IllegalArgumentException.class, () -> roundTrip(Version.V_5_0_0_rc1, reindex, null)); + assertEquals("Attempting to send sliced reindex-style request to a node that doesn't support it. " + + "Version is [5.0.0-rc1] but must be [5.1.0]", e.getMessage()); + + // Try without slices with a version that doesn't support slices. That should work. + tripped = new ReindexRequest(); + reindex.setSlices(1); + roundTrip(Version.V_5_0_0_rc1, reindex, tripped); + assertRequestEquals(reindex, tripped); } public void testUpdateByQueryRequest() throws IOException { @@ -98,9 +103,42 @@ public class RoundTripTests extends ESTestCase { roundTrip(update, tripped); assertRequestEquals(update, tripped); assertEquals(update.getPipeline(), tripped.getPipeline()); + + // Try slices with a version that doesn't support slices. That should fail. + update.setSlices(between(2, 1000)); + Exception e = expectThrows(IllegalArgumentException.class, () -> roundTrip(Version.V_5_0_0_rc1, update, null)); + assertEquals("Attempting to send sliced reindex-style request to a node that doesn't support it. " + + "Version is [5.0.0-rc1] but must be [5.1.0]", e.getMessage()); + + // Try without slices with a version that doesn't support slices. That should work. + tripped = new UpdateByQueryRequest(); + update.setSlices(1); + roundTrip(Version.V_5_0_0_rc1, update, tripped); + assertRequestEquals(update, tripped); + assertEquals(update.getPipeline(), tripped.getPipeline()); } - private void randomRequest(AbstractBulkIndexByScrollRequest request) { + public void testDeleteByQueryRequest() throws IOException { + DeleteByQueryRequest delete = new DeleteByQueryRequest(new SearchRequest()); + randomRequest(delete); + DeleteByQueryRequest tripped = new DeleteByQueryRequest(); + roundTrip(delete, tripped); + assertRequestEquals(delete, tripped); + + // Try slices with a version that doesn't support slices. That should fail. + delete.setSlices(between(2, 1000)); + Exception e = expectThrows(IllegalArgumentException.class, () -> roundTrip(Version.V_5_0_0_rc1, delete, null)); + assertEquals("Attempting to send sliced reindex-style request to a node that doesn't support it. " + + "Version is [5.0.0-rc1] but must be [5.1.0]", e.getMessage()); + + // Try without slices with a version that doesn't support slices. That should work. + tripped = new DeleteByQueryRequest(); + delete.setSlices(1); + roundTrip(Version.V_5_0_0_rc1, delete, tripped); + assertRequestEquals(delete, tripped); + } + + private void randomRequest(AbstractBulkByScrollRequest request) { request.getSearchRequest().indices("test"); request.getSearchRequest().source().size(between(1, 1000)); request.setSize(random().nextBoolean() ? between(1, Integer.MAX_VALUE) : -1); @@ -108,19 +146,45 @@ public class RoundTripTests extends ESTestCase { request.setRefresh(rarely()); request.setTimeout(TimeValue.parseTimeValue(randomTimeValue(), null, "test")); request.setWaitForActiveShards(randomIntBetween(0, 10)); - request.setScript(random().nextBoolean() ? null : randomScript()); request.setRequestsPerSecond(between(0, Integer.MAX_VALUE)); + request.setSlices(between(1, Integer.MAX_VALUE)); + } + + private void randomRequest(AbstractBulkIndexByScrollRequest request) { + randomRequest((AbstractBulkByScrollRequest) request); + request.setScript(random().nextBoolean() ? null : randomScript()); + } + + private void assertRequestEquals(ReindexRequest request, ReindexRequest tripped) { + assertRequestEquals((AbstractBulkIndexByScrollRequest) request, (AbstractBulkIndexByScrollRequest) tripped); + assertEquals(request.getDestination().version(), tripped.getDestination().version()); + assertEquals(request.getDestination().index(), tripped.getDestination().index()); + if (request.getRemoteInfo() == null) { + assertNull(tripped.getRemoteInfo()); + } else { + assertNotNull(tripped.getRemoteInfo()); + assertEquals(request.getRemoteInfo().getScheme(), tripped.getRemoteInfo().getScheme()); + assertEquals(request.getRemoteInfo().getHost(), tripped.getRemoteInfo().getHost()); + assertEquals(request.getRemoteInfo().getQuery(), tripped.getRemoteInfo().getQuery()); + assertEquals(request.getRemoteInfo().getUsername(), tripped.getRemoteInfo().getUsername()); + assertEquals(request.getRemoteInfo().getPassword(), tripped.getRemoteInfo().getPassword()); + assertEquals(request.getRemoteInfo().getHeaders(), tripped.getRemoteInfo().getHeaders()); + } } private void assertRequestEquals(AbstractBulkIndexByScrollRequest request, AbstractBulkIndexByScrollRequest tripped) { + assertRequestEquals((AbstractBulkByScrollRequest) request, (AbstractBulkByScrollRequest) tripped); + assertEquals(request.getScript(), tripped.getScript()); + } + + private void assertRequestEquals(AbstractBulkByScrollRequest request, AbstractBulkByScrollRequest tripped) { assertArrayEquals(request.getSearchRequest().indices(), tripped.getSearchRequest().indices()); assertEquals(request.getSearchRequest().source().size(), tripped.getSearchRequest().source().size()); assertEquals(request.isAbortOnVersionConflict(), tripped.isAbortOnVersionConflict()); assertEquals(request.isRefresh(), tripped.isRefresh()); assertEquals(request.getTimeout(), tripped.getTimeout()); assertEquals(request.getWaitForActiveShards(), tripped.getWaitForActiveShards()); - assertEquals(request.getScript(), tripped.getScript()); assertEquals(request.getRetryBackoffInitialTime(), tripped.getRetryBackoffInitialTime()); assertEquals(request.getMaxRetries(), tripped.getMaxRetries()); assertEquals(request.getRequestsPerSecond(), tripped.getRequestsPerSecond(), 0d); @@ -131,7 +195,16 @@ public class RoundTripTests extends ESTestCase { BytesStreamOutput out = new BytesStreamOutput(); status.writeTo(out); BulkByScrollTask.Status tripped = new BulkByScrollTask.Status(out.bytes().streamInput()); - assertTaskStatusEquals(status, tripped); + assertTaskStatusEquals(out.getVersion(), status, tripped); + + // Also check round tripping pre-5.1 which is the first version to support parallelized scroll + out = new BytesStreamOutput(); + out.setVersion(Version.V_5_0_0_rc1); // This can be V_5_0_0 + status.writeTo(out); + StreamInput in = out.bytes().streamInput(); + in.setVersion(Version.V_5_0_0_rc1); + tripped = new BulkByScrollTask.Status(in); + assertTaskStatusEquals(Version.V_5_0_0_rc1, status, tripped); } public void testReindexResponse() throws IOException { @@ -166,10 +239,38 @@ public class RoundTripTests extends ESTestCase { } private BulkByScrollTask.Status randomStatus() { - return new BulkByScrollTask.Status(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), - randomInt(Integer.MAX_VALUE), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), - parseTimeValue(randomPositiveTimeValue(), "test"), abs(random().nextFloat()), - random().nextBoolean() ? null : randomSimpleString(random()), parseTimeValue(randomPositiveTimeValue(), "test")); + if (randomBoolean()) { + return randomWorkingStatus(null); + } + boolean canHaveNullStatues = randomBoolean(); + List statuses = IntStream.range(0, between(0, 10)) + .mapToObj(i -> { + if (canHaveNullStatues && rarely()) { + return null; + } + if (randomBoolean()) { + return new BulkByScrollTask.StatusOrException(new ElasticsearchException(randomAsciiOfLength(5))); + } + return new BulkByScrollTask.StatusOrException(randomWorkingStatus(i)); + }) + .collect(toList()); + return new BulkByScrollTask.Status(statuses, randomBoolean() ? "test" : null); + } + + private BulkByScrollTask.Status randomWorkingStatus(Integer sliceId) { + // These all should be believably small because we sum them if we have multiple workers + int total = between(0, 10000000); + int updated = between(0, total); + int created = between(0, total - updated); + int deleted = between(0, total - updated - created); + int noops = total - updated - created - deleted; + int batches = between(0, 10000); + long versionConflicts = between(0, total); + long bulkRetries = between(0, 10000000); + long searchRetries = between(0, 100000); + return new BulkByScrollTask.Status(sliceId, total, updated, created, deleted, batches, versionConflicts, noops, bulkRetries, + searchRetries, parseTimeValue(randomPositiveTimeValue(), "test"), abs(random().nextFloat()), + randomBoolean() ? null : randomSimpleString(random()), parseTimeValue(randomPositiveTimeValue(), "test")); } private List randomIndexingFailures() { @@ -194,21 +295,30 @@ public class RoundTripTests extends ESTestCase { } private void roundTrip(Streamable example, Streamable empty) throws IOException { + roundTrip(Version.CURRENT, example, empty); + } + + private void roundTrip(Version version, Streamable example, Streamable empty) throws IOException { BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(version); example.writeTo(out); - empty.readFrom(out.bytes().streamInput()); + StreamInput in = out.bytes().streamInput(); + in.setVersion(version); + empty.readFrom(in); } private Script randomScript() { - return new Script(randomSimpleString(random()), // Name - randomFrom(ScriptType.values()), // Type - random().nextBoolean() ? null : randomSimpleString(random()), // Language - emptyMap()); // Params + ScriptType type = randomFrom(ScriptType.values()); + String lang = random().nextBoolean() ? Script.DEFAULT_SCRIPT_LANG : randomSimpleString(random()); + String idOrCode = randomSimpleString(random()); + Map params = Collections.emptyMap(); + + return new Script(type, lang, idOrCode, params); } private void assertResponseEquals(BulkIndexByScrollResponse expected, BulkIndexByScrollResponse actual) { assertEquals(expected.getTook(), actual.getTook()); - assertTaskStatusEquals(expected.getStatus(), actual.getStatus()); + assertTaskStatusEquals(Version.CURRENT, expected.getStatus(), actual.getStatus()); assertEquals(expected.getBulkFailures().size(), actual.getBulkFailures().size()); for (int i = 0; i < expected.getBulkFailures().size(); i++) { Failure expectedFailure = expected.getBulkFailures().get(i); @@ -232,7 +342,8 @@ public class RoundTripTests extends ESTestCase { } - private void assertTaskStatusEquals(BulkByScrollTask.Status expected, BulkByScrollTask.Status actual) { + private void assertTaskStatusEquals(Version version, BulkByScrollTask.Status expected, BulkByScrollTask.Status actual) { + assertEquals(expected.getTotal(), actual.getTotal()); assertEquals(expected.getUpdated(), actual.getUpdated()); assertEquals(expected.getCreated(), actual.getCreated()); assertEquals(expected.getDeleted(), actual.getDeleted()); @@ -245,5 +356,23 @@ public class RoundTripTests extends ESTestCase { assertEquals(expected.getRequestsPerSecond(), actual.getRequestsPerSecond(), 0f); assertEquals(expected.getReasonCancelled(), actual.getReasonCancelled()); assertEquals(expected.getThrottledUntil(), actual.getThrottledUntil()); + if (version.onOrAfter(BulkByScrollTask.V_5_1_0_UNRELEASED)) { + assertThat(actual.getSliceStatuses(), hasSize(expected.getSliceStatuses().size())); + for (int i = 0; i < expected.getSliceStatuses().size(); i++) { + BulkByScrollTask.StatusOrException sliceStatus = expected.getSliceStatuses().get(i); + if (sliceStatus == null) { + assertNull(actual.getSliceStatuses().get(i)); + } else if (sliceStatus.getException() == null) { + assertNull(actual.getSliceStatuses().get(i).getException()); + assertTaskStatusEquals(version, sliceStatus.getStatus(), actual.getSliceStatuses().get(i).getStatus()); + } else { + assertNull(actual.getSliceStatuses().get(i).getStatus()); + // Just check the message because we're not testing exception serialization in general here. + assertEquals(sliceStatus.getException().getMessage(), actual.getSliceStatuses().get(i).getException().getMessage()); + } + } + } else { + assertEquals(emptyList(), actual.getSliceStatuses()); + } } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java new file mode 100644 index 00000000000..276bdb6ea4b --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java @@ -0,0 +1,210 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matcher; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.theInstance; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.atMost; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class TransportRethrottleActionTests extends ESTestCase { + private int slices; + private ParentBulkByScrollTask task; + + @Before + public void createTask() { + slices = between(2, 50); + task = new ParentBulkByScrollTask(1, "test_type", "test_action", "test", null, slices); + } + + /** + * Test rethrottling. + * @param runningSlices the number of slices still running + * @param simulator simulate a response from the sub-request to rethrottle the child requests + * @param verifier verify the resulting response + */ + private void rethrottleTestCase(int runningSlices, Consumer> simulator, + Consumer> verifier) { + Client client = mock(Client.class); + String localNodeId = randomAsciiOfLength(5); + float newRequestsPerSecond = randomValueOtherThanMany(f -> f <= 0, () -> randomFloat()); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + + TransportRethrottleAction.rethrottle(localNodeId, client, task, newRequestsPerSecond, listener); + + // Capture the sub request and the listener so we can verify they are sane + ArgumentCaptor subRequest = ArgumentCaptor.forClass(RethrottleRequest.class); + @SuppressWarnings({ "unchecked", "rawtypes" }) // Magical generics incantation..... + ArgumentCaptor> subListener = ArgumentCaptor.forClass((Class) ActionListener.class); + if (runningSlices > 0) { + verify(client).execute(eq(RethrottleAction.INSTANCE), subRequest.capture(), subListener.capture()); + + assertEquals(new TaskId(localNodeId, task.getId()), subRequest.getValue().getParentTaskId()); + assertEquals(newRequestsPerSecond / runningSlices, subRequest.getValue().getRequestsPerSecond(), 0.00001f); + + simulator.accept(subListener.getValue()); + } + verifier.accept(listener); + } + + private Consumer> expectSuccessfulRethrottleWithStatuses( + List sliceStatuses) { + return listener -> { + TaskInfo taskInfo = captureResponse(TaskInfo.class, listener); + assertEquals(sliceStatuses, ((BulkByScrollTask.Status) taskInfo.getStatus()).getSliceStatuses()); + }; + } + + public void testRethrottleSuccessfulResponse() { + List tasks = new ArrayList<>(); + List sliceStatuses = new ArrayList<>(slices); + for (int i = 0; i < slices; i++) { + BulkByScrollTask.Status status = believeableInProgressStatus(i); + tasks.add(new TaskInfo(new TaskId("test", 123), "test", "test", "test", status, 0, 0, true, new TaskId("test", task.getId()))); + sliceStatuses.add(new BulkByScrollTask.StatusOrException(status)); + } + rethrottleTestCase(slices, + listener -> listener.onResponse(new ListTasksResponse(tasks, emptyList(), emptyList())), + expectSuccessfulRethrottleWithStatuses(sliceStatuses)); + } + + public void testRethrottleWithSomeSucceeded() { + int succeeded = between(1, slices - 1); + List sliceStatuses = new ArrayList<>(slices); + for (int i = 0; i < succeeded; i++) { + BulkByScrollTask.Status status = believeableCompletedStatus(i); + task.onSliceResponse(neverCalled(), i, + new BulkIndexByScrollResponse(timeValueMillis(10), status, emptyList(), emptyList(), false)); + sliceStatuses.add(new BulkByScrollTask.StatusOrException(status)); + } + List tasks = new ArrayList<>(); + for (int i = succeeded; i < slices; i++) { + BulkByScrollTask.Status status = believeableInProgressStatus(i); + tasks.add(new TaskInfo(new TaskId("test", 123), "test", "test", "test", status, 0, 0, true, new TaskId("test", task.getId()))); + sliceStatuses.add(new BulkByScrollTask.StatusOrException(status)); + } + rethrottleTestCase(slices - succeeded, + listener -> listener.onResponse(new ListTasksResponse(tasks, emptyList(), emptyList())), + expectSuccessfulRethrottleWithStatuses(sliceStatuses)); + } + + public void testRethrottleWithAllSucceeded() { + List sliceStatuses = new ArrayList<>(slices); + for (int i = 0; i < slices; i++) { + @SuppressWarnings("unchecked") + ActionListener listener = i < slices - 1 ? neverCalled() : mock(ActionListener.class); + BulkByScrollTask.Status status = believeableCompletedStatus(i); + task.onSliceResponse(listener, i, new BulkIndexByScrollResponse(timeValueMillis(10), status, emptyList(), emptyList(), false)); + if (i == slices - 1) { + // The whole thing succeeded so we should have got the success + captureResponse(BulkIndexByScrollResponse.class, listener).getStatus(); + } + sliceStatuses.add(new BulkByScrollTask.StatusOrException(status)); + } + rethrottleTestCase(0, + listener -> { /* There are no async tasks to simulate because the listener is called for us. */}, + expectSuccessfulRethrottleWithStatuses(sliceStatuses)); + } + + private Consumer> expectException(Matcher exceptionMatcher) { + return listener -> { + ArgumentCaptor failure = ArgumentCaptor.forClass(Exception.class); + verify(listener).onFailure(failure.capture()); + assertThat(failure.getValue(), exceptionMatcher); + }; + } + + public void testRethrottleCatastrophicFailures() { + Exception e = new Exception(); + rethrottleTestCase(slices, listener -> listener.onFailure(e), expectException(theInstance(e))); + } + + public void testRethrottleTaskOperationFailure() { + Exception e = new Exception(); + TaskOperationFailure failure = new TaskOperationFailure("test", 123, e); + rethrottleTestCase(slices, + listener -> listener.onResponse(new ListTasksResponse(emptyList(), singletonList(failure), emptyList())), + expectException(hasToString(containsString("Rethrottle of [test:123] failed")))); + } + + public void testRethrottleNodeFailure() { + FailedNodeException e = new FailedNodeException("test", "test", new Exception()); + rethrottleTestCase(slices, + listener -> listener.onResponse(new ListTasksResponse(emptyList(), emptyList(), singletonList(e))), + expectException(theInstance(e))); + } + + private BulkByScrollTask.Status believeableInProgressStatus(Integer sliceId) { + return new BulkByScrollTask.Status(sliceId, 10, 0, 0, 0, 0, 0, 0, 0, 0, timeValueMillis(0), 0, null, timeValueMillis(0)); + } + + private BulkByScrollTask.Status believeableCompletedStatus(Integer sliceId) { + return new BulkByScrollTask.Status(sliceId, 10, 10, 0, 0, 0, 0, 0, 0, 0, timeValueMillis(0), 0, null, timeValueMillis(0)); + } + + static ActionListener neverCalled() { + return new ActionListener() { + @Override + public void onResponse(T response) { + throw new RuntimeException("Expected no interactions but got [" + response + "]"); + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException("Expected no interations but was received a failure", e); + } + }; + } + + static T captureResponse(Class responseClass, ActionListener listener) { + ArgumentCaptor failure = ArgumentCaptor.forClass(Exception.class); + // Rethrow any failures just so we get a nice exception if there were any. We don't expect any though. + verify(listener, atMost(1)).onFailure(failure.capture()); + if (false == failure.getAllValues().isEmpty()) { + throw new AssertionError(failure.getValue()); + } + ArgumentCaptor response = ArgumentCaptor.forClass(responseClass); + verify(listener).onResponse(response.capture()); + return response.getValue(); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java index 773e693a9bf..663575a2933 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.search.sort.SortOrder; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.hasSize; public class UpdateByQueryBasicTests extends ReindexTestCase { public void testBasics() throws Exception { @@ -61,4 +62,33 @@ public class UpdateByQueryBasicTests extends ReindexTestCase { assertEquals(3, client().prepareGet("test", "test", "3").get().getVersion()); assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); } + + public void testWorkers() throws Exception { + indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "a"), + client().prepareIndex("test", "test", "2").setSource("foo", "a"), + client().prepareIndex("test", "test", "3").setSource("foo", "b"), + client().prepareIndex("test", "test", "4").setSource("foo", "c")); + assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 4); + assertEquals(1, client().prepareGet("test", "test", "1").get().getVersion()); + assertEquals(1, client().prepareGet("test", "test", "4").get().getVersion()); + + // Reindex all the docs + assertThat(updateByQuery().source("test").refresh(true).setSlices(5).get(), matcher().updated(4).slices(hasSize(5))); + assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion()); + assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); + + // Now none of them + assertThat(updateByQuery().source("test").filter(termQuery("foo", "no_match")).setSlices(5).refresh(true).get(), + matcher().updated(0).slices(hasSize(5))); + assertEquals(2, client().prepareGet("test", "test", "1").get().getVersion()); + assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); + + // Now half of them + assertThat(updateByQuery().source("test").filter(termQuery("foo", "a")).refresh(true).setSlices(5).get(), + matcher().updated(2).slices(hasSize(5))); + assertEquals(3, client().prepareGet("test", "test", "1").get().getVersion()); + assertEquals(3, client().prepareGet("test", "test", "2").get().getVersion()); + assertEquals(2, client().prepareGet("test", "test", "3").get().getVersion()); + assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); + } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java index a4224a70c57..2d881aff91a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java @@ -21,11 +21,11 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.script.Script; import static org.apache.lucene.util.TestUtil.randomSimpleString; -public class UpdateByQueryRequestTests extends ESTestCase { +public class UpdateByQueryRequestTests extends AbstractBulkByScrollRequestTestCase { public void testUpdateByQueryRequestImplementsIndicesRequestReplaceable() { int numIndices = between(1, 100); String[] indices = new String[numIndices]; @@ -58,4 +58,25 @@ public class UpdateByQueryRequestTests extends ESTestCase { assertEquals(newIndices[i], request.getSearchRequest().indices()[i]); } } + + @Override + protected UpdateByQueryRequest newRequest() { + return new UpdateByQueryRequest(new SearchRequest(randomAsciiOfLength(5))); + } + + @Override + protected void extraRandomizationForSlice(UpdateByQueryRequest original) { + if (randomBoolean()) { + original.setScript(new Script(randomAsciiOfLength(5))); + } + if (randomBoolean()) { + original.setPipeline(randomAsciiOfLength(5)); + } + } + + @Override + protected void extraForSliceAssertions(UpdateByQueryRequest original, UpdateByQueryRequest forSliced) { + assertEquals(original.getScript(), forSliced.getScript()); + assertEquals(original.getPipeline(), forSliced.getPipeline()); + } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTaskTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTaskTests.java new file mode 100644 index 00000000000..389b5a29142 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTaskTests.java @@ -0,0 +1,265 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.Delayed; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; +import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class WorkingBulkByScrollTaskTests extends ESTestCase { + private WorkingBulkByScrollTask task; + + @Before + public void createTask() { + task = new WorkingBulkByScrollTask(1, "test_type", "test_action", "test", TaskId.EMPTY_TASK_ID, null, Float.POSITIVE_INFINITY); + } + + public void testBasicData() { + assertEquals(1, task.getId()); + assertEquals("test_type", task.getType()); + assertEquals("test_action", task.getAction()); + assertEquals("test", task.getDescription()); + } + + public void testProgress() { + long created = 0; + long updated = 0; + long deleted = 0; + long versionConflicts = 0; + long noops = 0; + int batch = 0; + BulkByScrollTask.Status status = task.getStatus(); + assertEquals(0, status.getTotal()); + assertEquals(created, status.getCreated()); + assertEquals(updated, status.getUpdated()); + assertEquals(deleted, status.getDeleted()); + assertEquals(versionConflicts, status.getVersionConflicts()); + assertEquals(batch, status.getBatches()); + assertEquals(noops, status.getNoops()); + + long totalHits = randomIntBetween(10, 1000); + task.setTotal(totalHits); + for (long p = 0; p < totalHits; p++) { + status = task.getStatus(); + assertEquals(totalHits, status.getTotal()); + assertEquals(created, status.getCreated()); + assertEquals(updated, status.getUpdated()); + assertEquals(deleted, status.getDeleted()); + assertEquals(versionConflicts, status.getVersionConflicts()); + assertEquals(batch, status.getBatches()); + assertEquals(noops, status.getNoops()); + + if (randomBoolean()) { + created++; + task.countCreated(); + } else if (randomBoolean()) { + updated++; + task.countUpdated(); + } else { + deleted++; + task.countDeleted(); + } + + if (rarely()) { + versionConflicts++; + task.countVersionConflict(); + } + + if (rarely()) { + batch++; + task.countBatch(); + } + + if (rarely()) { + noops++; + task.countNoop(); + } + } + status = task.getStatus(); + assertEquals(totalHits, status.getTotal()); + assertEquals(created, status.getCreated()); + assertEquals(updated, status.getUpdated()); + assertEquals(deleted, status.getDeleted()); + assertEquals(versionConflicts, status.getVersionConflicts()); + assertEquals(batch, status.getBatches()); + assertEquals(noops, status.getNoops()); + } + + /** + * Furiously rethrottles a delayed request to make sure that we never run it twice. + */ + public void testDelayAndRethrottle() throws IOException, InterruptedException { + List errors = new CopyOnWriteArrayList<>(); + AtomicBoolean done = new AtomicBoolean(); + int threads = between(1, 10); + CyclicBarrier waitForShutdown = new CyclicBarrier(threads); + + /* + * We never end up waiting this long because the test rethrottles over and over again, ratcheting down the delay a random amount + * each time. + */ + float originalRequestsPerSecond = (float) randomDoubleBetween(1, 10000, true); + task.rethrottle(originalRequestsPerSecond); + TimeValue maxDelay = timeValueSeconds(between(1, 5)); + assertThat(maxDelay.nanos(), greaterThanOrEqualTo(0L)); + int batchSizeForMaxDelay = (int) (maxDelay.seconds() * originalRequestsPerSecond); + ThreadPool threadPool = new TestThreadPool(getTestName()) { + @Override + public ScheduledFuture schedule(TimeValue delay, String name, Runnable command) { + assertThat(delay.nanos(), both(greaterThanOrEqualTo(0L)).and(lessThanOrEqualTo(maxDelay.nanos()))); + return super.schedule(delay, name, command); + } + }; + try { + task.delayPrepareBulkRequest(threadPool, timeValueNanos(System.nanoTime()), batchSizeForMaxDelay, new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + boolean oldValue = done.getAndSet(true); + if (oldValue) { + throw new RuntimeException("Ran twice oh no!"); + } + } + + @Override + public void onFailure(Exception e) { + errors.add(e); + } + }); + + // Rethrottle on a random number of threads, on of which is this thread. + Runnable test = () -> { + try { + int rethrottles = 0; + while (false == done.get()) { + float requestsPerSecond = (float) randomDoubleBetween(0, originalRequestsPerSecond * 2, true); + task.rethrottle(requestsPerSecond); + rethrottles += 1; + } + logger.info("Rethrottled [{}] times", rethrottles); + waitForShutdown.await(); + } catch (Exception e) { + errors.add(e); + } + }; + for (int i = 1; i < threads; i++) { + threadPool.generic().execute(test); + } + test.run(); + } finally { + // Other threads should finish up quickly as they are checking the same AtomicBoolean. + threadPool.shutdown(); + threadPool.awaitTermination(10, TimeUnit.SECONDS); + } + assertThat(errors, empty()); + } + + public void testDelayNeverNegative() throws IOException { + // Thread pool that returns a ScheduledFuture that claims to have a negative delay + ThreadPool threadPool = new TestThreadPool("test") { + public ScheduledFuture schedule(TimeValue delay, String name, Runnable command) { + return new ScheduledFuture() { + @Override + public long getDelay(TimeUnit unit) { + return -1; + } + + @Override + public int compareTo(Delayed o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isCancelled() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isDone() { + throw new UnsupportedOperationException(); + } + + @Override + public Void get() throws InterruptedException, ExecutionException { + throw new UnsupportedOperationException(); + } + + @Override + public Void get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + throw new UnsupportedOperationException(); + } + }; + } + }; + try { + // Have the task use the thread pool to delay a task that does nothing + task.delayPrepareBulkRequest(threadPool, timeValueSeconds(0), 1, new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + } + @Override + public void onFailure(Exception e) { + throw new UnsupportedOperationException(); + } + }); + // Even though the future returns a negative delay we just return 0 because the time is up. + assertEquals(timeValueSeconds(0), task.getStatus().getThrottledUntil()); + } finally { + threadPool.shutdown(); + } + } + + public void testPerfectlyThrottledBatchTime() { + task.rethrottle(Float.POSITIVE_INFINITY); + assertThat((double) task.perfectlyThrottledBatchTime(randomInt()), closeTo(0f, 0f)); + + int total = between(0, 1000000); + task.rethrottle(1); + assertThat((double) task.perfectlyThrottledBatchTime(total), + closeTo(TimeUnit.SECONDS.toNanos(total), TimeUnit.SECONDS.toNanos(1))); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index 9bbfd175a79..9f2eaad0a67 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -96,7 +96,7 @@ public class RemoteRequestBuildersTests extends ESTestCase { // Test sort:_doc for versions that support it. Version remoteVersion = Version.fromId(between(Version.V_2_1_0_ID, Version.CURRENT.id)); searchRequest.source().sort("_doc"); - assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("sorts", "_doc:asc")); + assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("sort", "_doc:asc")); // Test search_type scan for versions that don't support sort:_doc. remoteVersion = Version.fromId(between(0, Version.V_2_1_0_ID - 1)); @@ -106,14 +106,14 @@ public class RemoteRequestBuildersTests extends ESTestCase { remoteVersion = Version.fromId(between(0, Version.CURRENT.id)); searchRequest.source().sorts().clear(); searchRequest.source().sort("foo"); - assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("sorts", "foo:asc")); + assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("sort", "foo:asc")); } public void testInitialSearchParamsFields() { SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); // Test request without any fields - Version remoteVersion = Version.fromId(between(0, Version.CURRENT.id)); + Version remoteVersion = Version.fromId(between(Version.V_2_0_0_beta1_ID, Version.CURRENT.id)); assertThat(initialSearchParams(searchRequest, remoteVersion), not(either(hasKey("stored_fields")).or(hasKey("fields")))); @@ -125,8 +125,12 @@ public class RemoteRequestBuildersTests extends ESTestCase { assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("stored_fields", "_source,_id")); // Test fields for versions that support it - remoteVersion = Version.fromId(between(0, Version.V_5_0_0_alpha4_ID - 1)); + remoteVersion = Version.fromId(between(Version.V_2_0_0_beta1_ID, Version.V_5_0_0_alpha4_ID - 1)); assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("fields", "_source,_id")); + + // Test extra fields for versions that need it + remoteVersion = Version.fromId(between(0, Version.V_2_0_0_beta1_ID - 1)); + assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("fields", "_source,_id,_parent,_routing,_ttl")); } public void testInitialSearchParamsMisc() { @@ -151,6 +155,7 @@ public class RemoteRequestBuildersTests extends ESTestCase { assertThat(params, scroll == null ? not(hasKey("scroll")) : hasEntry("scroll", scroll.toString())); assertThat(params, hasEntry("size", Integer.toString(size))); assertThat(params, fetchVersion == null || fetchVersion == true ? hasEntry("version", null) : not(hasEntry("version", null))); + assertThat(params, hasEntry("_source", "true")); } public void testInitialSearchEntity() throws IOException { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 351eb49f906..5c2278f59eb 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex.remote; +import org.apache.http.ContentTooLongException; import org.apache.http.HttpEntity; import org.apache.http.HttpEntityEnclosingRequest; import org.apache.http.HttpHost; @@ -39,10 +40,13 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.HeapBufferedAsyncResponseConsumer; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.index.reindex.ScrollableHitSource.Response; @@ -76,7 +80,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class RemoteScrollableHitSourceTests extends ESTestCase { - private final String FAKE_SCROLL_ID = "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll"; + private static final String FAKE_SCROLL_ID = "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll"; private int retries; private ThreadPool threadPool; private SearchRequest searchRequest; @@ -192,7 +196,7 @@ public class RemoteScrollableHitSourceTests extends ESTestCase { } /** - * Test for parsing _ttl, _timestamp, and _routing. + * Test for parsing _ttl, _timestamp, _routing, and _parent. */ public void testParseScrollFullyLoaded() throws Exception { AtomicBoolean called = new AtomicBoolean(); @@ -208,6 +212,24 @@ public class RemoteScrollableHitSourceTests extends ESTestCase { assertTrue(called.get()); } + /** + * Test for parsing _ttl, _routing, and _parent. _timestamp isn't available. + */ + public void testParseScrollFullyLoadedFrom1_7() throws Exception { + AtomicBoolean called = new AtomicBoolean(); + sourceWithMockedRemoteCall("scroll_fully_loaded_1_7.json").doStartNextScroll("", timeValueMillis(0), r -> { + assertEquals("AVToMiDL50DjIiBO3yKA", r.getHits().get(0).getId()); + assertEquals("{\"test\":\"test3\"}", r.getHits().get(0).getSource().utf8ToString()); + assertEquals((Long) 1234L, r.getHits().get(0).getTTL()); + assertNull(r.getHits().get(0).getTimestamp()); // Not available from 1.7 + assertEquals("testrouting", r.getHits().get(0).getRouting()); + assertEquals("testparent", r.getHits().get(0).getParent()); + called.set(true); + }); + assertTrue(called.get()); + } + + /** * Versions of Elasticsearch before 2.1.0 don't support sort:_doc and instead need to use search_type=scan. Scan doesn't return * documents the first iteration but reindex doesn't like that. So we jump start strait to the next iteration. @@ -411,6 +433,38 @@ public class RemoteScrollableHitSourceTests extends ESTestCase { assertEquals(badEntityException, wrapped.getSuppressed()[0]); } + @SuppressWarnings({ "unchecked", "rawtypes" }) + public void testTooLargeResponse() throws Exception { + ContentTooLongException tooLong = new ContentTooLongException("too long!"); + CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); + when(httpClient.execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class), + any(FutureCallback.class))).then(new Answer>() { + @Override + public Future answer(InvocationOnMock invocationOnMock) throws Throwable { + HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1]; + FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[2]; + assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit()); + callback.failed(tooLong); + return null; + } + }); + RemoteScrollableHitSource source = sourceWithMockedClient(true, httpClient); + + AtomicBoolean called = new AtomicBoolean(); + Consumer checkResponse = r -> called.set(true); + Throwable e = expectThrows(RuntimeException.class, + () -> source.doStartNextScroll(FAKE_SCROLL_ID, timeValueMillis(0), checkResponse)); + // Unwrap the some artifacts from the test + while (e.getMessage().equals("failed")) { + e = e.getCause(); + } + // This next exception is what the user sees + assertEquals("Remote responded with a chunk that was too large. Use a smaller batch size.", e.getMessage()); + // And that exception is reported as being caused by the underlying exception returned by the client + assertSame(tooLong, e.getCause()); + assertFalse(called.get()); + } + private RemoteScrollableHitSource sourceWithMockedRemoteCall(String... paths) throws Exception { return sourceWithMockedRemoteCall(true, paths); } @@ -464,7 +518,11 @@ public class RemoteScrollableHitSourceTests extends ESTestCase { return null; } }); + return sourceWithMockedClient(mockRemoteVersion, httpClient); + } + private RemoteScrollableHitSource sourceWithMockedClient(boolean mockRemoteVersion, CloseableHttpAsyncClient httpClient) + throws Exception { HttpAsyncClientBuilder clientBuilder = mock(HttpAsyncClientBuilder.class); when(clientBuilder.build()).thenReturn(httpClient); diff --git a/modules/reindex/src/test/resources/responses/scroll_fully_loaded_1_7.json b/modules/reindex/src/test/resources/responses/scroll_fully_loaded_1_7.json new file mode 100644 index 00000000000..f8bebddecf3 --- /dev/null +++ b/modules/reindex/src/test/resources/responses/scroll_fully_loaded_1_7.json @@ -0,0 +1,31 @@ +{ + "_scroll_id" : "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll", + "took" : 3, + "timed_out" : false, + "terminated_early" : true, + "_shards" : { + "total" : 5, + "successful" : 5, + "failed" : 0 + }, + "hits" : { + "total" : 4, + "max_score" : null, + "hits" : [ { + "_index" : "test", + "_type" : "test", + "_id" : "AVToMiDL50DjIiBO3yKA", + "_version" : 1, + "_score" : null, + "_source" : { + "test" : "test3" + }, + "sort" : [ 0 ], + "fields" : { + "_routing" : "testrouting", + "_ttl" : 1234, + "_parent" : "testparent" + } + } ] + } +} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml index 8833c844b22..4aa63facc24 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml @@ -109,3 +109,24 @@ body: query: match_all: {} + +--- +"junk in slices fails": + - do: + catch: /Failed to parse int parameter \[slices\] with value \[junk\]/ + delete_by_query: + slices: junk + index: test + body: + query: + match_all: {} +--- +"zero slices fails": + - do: + catch: /\[slices\] must be at least 1/ + delete_by_query: + slices: 0 + index: test + body: + query: + match_all: {} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/80_slices.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/80_slices.yaml new file mode 100644 index 00000000000..fe0c816ee14 --- /dev/null +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/80_slices.yaml @@ -0,0 +1,279 @@ +--- +"Multiple slices": + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 2 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 3 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 4 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + index: test + slices: 5 + body: + query: + match_all: {} + + - is_false: timed_out + - match: {deleted: 4} + - is_false: created + - is_false: updated + - match: {version_conflicts: 0} + - match: {failures: []} + - match: {noops: 0} + - match: {throttled_millis: 0} + - gte: { took: 0 } + - is_false: task + - match: {slices.0.version_conflicts: 0} + - match: {slices.0.throttled_millis: 0} + - match: {slices.1.version_conflicts: 0} + - match: {slices.1.throttled_millis: 0} + - match: {slices.2.version_conflicts: 0} + - match: {slices.2.throttled_millis: 0} + - match: {slices.3.version_conflicts: 0} + - match: {slices.3.throttled_millis: 0} + - match: {slices.4.version_conflicts: 0} + - match: {slices.4.throttled_millis: 0} + + - do: + indices.refresh: {} + - do: + count: + index: test + - match: {count: 0} + +--- +"Multiple slices with wait_for_completion=false": + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 2 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 3 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 4 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + index: test + wait_for_completion: false + slices: 5 + body: + query: + match_all: {} + - is_false: timed_out + - match: {task: '/.+:\d+/'} + - set: {task: task} + - is_false: updated + - is_false: version_conflicts + - is_false: batches + - is_false: failures + - is_false: noops + - is_false: took + - is_false: throttled_millis + - is_false: created + - is_false: deleted + + - do: + tasks.get: + wait_for_completion: true + task_id: $task + - is_false: node_failures + # The task will be in the response even if it finished before we got here + # because of task persistence. + - is_true: task + - match: {response.total: 4} + - match: {response.deleted: 4} + - match: {response.version_conflicts: 0} + - match: {response.failures: []} + - match: {response.throttled_millis: 0} + - gte: { response.took: 0 } + - is_false: response.task + - match: {response.slices.0.version_conflicts: 0} + - match: {response.slices.0.throttled_millis: 0} + - match: {response.slices.1.version_conflicts: 0} + - match: {response.slices.1.throttled_millis: 0} + - match: {response.slices.2.version_conflicts: 0} + - match: {response.slices.2.throttled_millis: 0} + - match: {response.slices.3.version_conflicts: 0} + - match: {response.slices.3.throttled_millis: 0} + - match: {response.slices.4.version_conflicts: 0} + - match: {response.slices.4.throttled_millis: 0} + - match: {task.status.total: 4} + - match: {task.status.deleted: 4} + - match: {task.status.version_conflicts: 0} + - match: {task.status.throttled_millis: 0} + - match: {task.status.slices.0.version_conflicts: 0} + - match: {task.status.slices.0.throttled_millis: 0} + - match: {task.status.slices.1.version_conflicts: 0} + - match: {task.status.slices.1.throttled_millis: 0} + - match: {task.status.slices.2.version_conflicts: 0} + - match: {task.status.slices.2.throttled_millis: 0} + - match: {task.status.slices.3.version_conflicts: 0} + - match: {task.status.slices.3.throttled_millis: 0} + - match: {task.status.slices.4.version_conflicts: 0} + - match: {task.status.slices.4.throttled_millis: 0} + + + # Only the "parent" reindex task wrote its status to the tasks index though + - do: + indices.refresh: {} + - do: + search: + index: .tasks + - match: { hits.total: 1 } + + - do: + count: + index: test + - match: {count: 0} + +--- +"Multiple slices with rethrottle": + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 2 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 3 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 4 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 5 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 6 + body: { "text": "test" } + - do: + indices.refresh: {} + + # Start the task with a requests_per_second that should make it take a very long time + - do: + delete_by_query: + index: test + wait_for_completion: false + slices: 2 + requests_per_second: 0.0001 + body: + query: + match_all: {} + - is_false: timed_out + - match: {task: '/.+:\d+/'} + - set: {task: task} + - is_false: updated + - is_false: version_conflicts + - is_false: batches + - is_false: failures + - is_false: noops + - is_false: took + - is_false: throttled_millis + - is_false: created + - is_false: deleted + + # Allow the task to complete + - do: + reindex_rethrottle: + requests_per_second: -1 + task_id: $task + + - do: + tasks.get: + wait_for_completion: true + task_id: $task + - is_false: node_failures + # The task will be in the response even if it finished before we got here + # because of task persistence. + - is_true: task + - match: {response.total: 6} + - match: {response.deleted: 6} + - match: {response.version_conflicts: 0} + - match: {response.failures: []} + - match: {response.throttled_millis: 0} + - gte: { response.took: 0 } + - is_false: response.task + - match: {response.slices.0.version_conflicts: 0} + - match: {response.slices.0.throttled_millis: 0} + - match: {response.slices.1.version_conflicts: 0} + - match: {response.slices.1.throttled_millis: 0} + - match: {task.status.total: 6} + - match: {task.status.deleted: 6} + - match: {task.status.version_conflicts: 0} + - match: {task.status.throttled_millis: 0} + - match: {task.status.slices.0.version_conflicts: 0} + - match: {task.status.slices.0.throttled_millis: 0} + - match: {task.status.slices.1.version_conflicts: 0} + - match: {task.status.slices.1.throttled_millis: 0} + + # Only the "parent" reindex task wrote its status to the tasks index though + - do: + indices.refresh: {} + - do: + search: + index: .tasks + - match: { hits.total: 1 } + + - do: + count: + index: test + - match: {count: 0} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml index cab92310dbd..ffcdb42c86c 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml @@ -279,3 +279,27 @@ index: test dest: index: dest + +--- +"junk in slices fails": + - do: + catch: /Failed to parse int parameter \[slices\] with value \[junk\]/ + reindex: + slices: junk + body: + source: + index: test + dest: + index: dest + +--- +"zero slices fails": + - do: + catch: /\[slices\] must be at least 1/ + reindex: + slices: 0 + body: + source: + index: test + dest: + index: dest diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/80_slices.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/80_slices.yaml new file mode 100644 index 00000000000..5c54adb5c08 --- /dev/null +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/80_slices.yaml @@ -0,0 +1,287 @@ +--- +"Multiple slices": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 2 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 3 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 4 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + reindex: + slices: 5 + body: + source: + index: source + dest: + index: dest + - match: {created: 4} + - match: {updated: 0} + - match: {version_conflicts: 0} + - match: {failures: []} + - match: {throttled_millis: 0} + - gte: { took: 0 } + - is_false: task + - is_false: deleted + - match: {slices.0.updated: 0} + - match: {slices.0.version_conflicts: 0} + - match: {slices.0.throttled_millis: 0} + - match: {slices.1.updated: 0} + - match: {slices.1.version_conflicts: 0} + - match: {slices.1.throttled_millis: 0} + - match: {slices.2.updated: 0} + - match: {slices.2.version_conflicts: 0} + - match: {slices.2.throttled_millis: 0} + - match: {slices.3.updated: 0} + - match: {slices.3.version_conflicts: 0} + - match: {slices.3.throttled_millis: 0} + - match: {slices.4.updated: 0} + - match: {slices.4.version_conflicts: 0} + - match: {slices.4.throttled_millis: 0} + +--- +"Multiple slices with wait_for_completion=false": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 2 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 3 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 4 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + reindex: + slices: 5 + wait_for_completion: false + body: + source: + index: source + dest: + index: dest + - is_false: timed_out + - match: {task: '/.+:\d+/'} + - set: {task: task} + - is_false: updated + - is_false: version_conflicts + - is_false: batches + - is_false: failures + - is_false: noops + - is_false: took + - is_false: throttled_millis + - is_false: created + - is_false: deleted + + - do: + tasks.get: + wait_for_completion: true + task_id: $task + - is_false: node_failures + # The task will be in the response even if it finished before we got here + # because of task persistence. + - is_true: task + - match: {response.total: 4} + - match: {response.created: 4} + - match: {response.updated: 0} + - match: {response.version_conflicts: 0} + - match: {response.failures: []} + - match: {response.throttled_millis: 0} + - gte: { response.took: 0 } + - is_false: response.task + - is_false: response.deleted + - match: {response.slices.0.updated: 0} + - match: {response.slices.0.version_conflicts: 0} + - match: {response.slices.0.throttled_millis: 0} + - match: {response.slices.1.updated: 0} + - match: {response.slices.1.version_conflicts: 0} + - match: {response.slices.1.throttled_millis: 0} + - match: {response.slices.2.updated: 0} + - match: {response.slices.2.version_conflicts: 0} + - match: {response.slices.2.throttled_millis: 0} + - match: {response.slices.3.updated: 0} + - match: {response.slices.3.version_conflicts: 0} + - match: {response.slices.3.throttled_millis: 0} + - match: {response.slices.4.updated: 0} + - match: {response.slices.4.version_conflicts: 0} + - match: {response.slices.4.throttled_millis: 0} + - match: {task.status.total: 4} + - match: {task.status.created: 4} + - match: {task.status.updated: 0} + - match: {task.status.version_conflicts: 0} + - match: {task.status.throttled_millis: 0} + - match: {task.status.slices.0.updated: 0} + - match: {task.status.slices.0.version_conflicts: 0} + - match: {task.status.slices.0.throttled_millis: 0} + - match: {task.status.slices.1.updated: 0} + - match: {task.status.slices.1.version_conflicts: 0} + - match: {task.status.slices.1.throttled_millis: 0} + - match: {task.status.slices.2.updated: 0} + - match: {task.status.slices.2.version_conflicts: 0} + - match: {task.status.slices.2.throttled_millis: 0} + - match: {task.status.slices.3.updated: 0} + - match: {task.status.slices.3.version_conflicts: 0} + - match: {task.status.slices.3.throttled_millis: 0} + - match: {task.status.slices.4.updated: 0} + - match: {task.status.slices.4.version_conflicts: 0} + - match: {task.status.slices.4.throttled_millis: 0} + + # Only the "parent" reindex task wrote its status to the tasks index though + - do: + indices.refresh: {} + - do: + search: + index: .tasks + - match: { hits.total: 1 } + + +--- +"Multiple slices with rethrottle": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 2 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 3 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 4 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 5 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 6 + body: { "text": "test" } + - do: + indices.refresh: {} + + # Start the task with a requests_per_second that should make it take a very long time + - do: + reindex: + slices: 2 + wait_for_completion: false + requests_per_second: 0.0001 + body: + source: + index: source + dest: + index: dest + - is_false: timed_out + - match: {task: '/.+:\d+/'} + - set: {task: task} + - is_false: updated + - is_false: version_conflicts + - is_false: batches + - is_false: failures + - is_false: noops + - is_false: took + - is_false: throttled_millis + - is_false: created + - is_false: deleted + + # Allow the task to complete + - do: + reindex_rethrottle: + requests_per_second: -1 + task_id: $task + + - do: + tasks.get: + wait_for_completion: true + task_id: $task + - is_false: node_failures + # The task will be in the response even if it finished before we got here + # because of task persistence. + - is_true: task + - match: {response.total: 6} + - match: {response.created: 6} + - match: {response.updated: 0} + - match: {response.version_conflicts: 0} + - match: {response.failures: []} + - match: {response.throttled_millis: 0} + - gte: { response.took: 0 } + - is_false: response.task + - is_false: response.deleted + - match: {response.slices.0.updated: 0} + - match: {response.slices.0.version_conflicts: 0} + - match: {response.slices.0.throttled_millis: 0} + - match: {response.slices.1.updated: 0} + - match: {response.slices.1.version_conflicts: 0} + - match: {response.slices.1.throttled_millis: 0} + - match: {task.status.total: 6} + - match: {task.status.created: 6} + - match: {task.status.updated: 0} + - match: {task.status.version_conflicts: 0} + - match: {task.status.throttled_millis: 0} + - match: {task.status.slices.0.updated: 0} + - match: {task.status.slices.0.version_conflicts: 0} + - match: {task.status.slices.0.throttled_millis: 0} + - match: {task.status.slices.1.updated: 0} + - match: {task.status.slices.1.version_conflicts: 0} + - match: {task.status.slices.1.throttled_millis: 0} + + # Only the "parent" reindex task wrote its status to the tasks index though + - do: + indices.refresh: {} + - do: + search: + index: .tasks + - match: { hits.total: 1 } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml index 54eb262e9ba..be83c0cb9f2 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml @@ -104,3 +104,19 @@ update_by_query: requests_per_second: 0 index: test + +--- +"junk in slices fails": + - do: + catch: /Failed to parse int parameter \[slices\] with value \[junk\]/ + update_by_query: + slices: junk + index: test + +--- +"zero slices fails": + - do: + catch: /\[slices\] must be at least 1/ + update_by_query: + slices: 0 + index: test diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/70_slices.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/70_slices.yaml new file mode 100644 index 00000000000..f390d43f9f3 --- /dev/null +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/70_slices.yaml @@ -0,0 +1,261 @@ +--- +"Multiple slices": + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 2 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 3 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 4 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + update_by_query: + index: test + slices: 5 + body: + query: + match_all: {} + + - is_false: timed_out + - match: {updated: 4} + - is_false: created + - match: {version_conflicts: 0} + - match: {failures: []} + - match: {noops: 0} + - match: {throttled_millis: 0} + - gte: { took: 0 } + - is_false: task + - match: {slices.0.version_conflicts: 0} + - match: {slices.0.throttled_millis: 0} + - match: {slices.1.version_conflicts: 0} + - match: {slices.1.throttled_millis: 0} + - match: {slices.2.version_conflicts: 0} + - match: {slices.2.throttled_millis: 0} + - match: {slices.3.version_conflicts: 0} + - match: {slices.3.throttled_millis: 0} + - match: {slices.4.version_conflicts: 0} + - match: {slices.4.throttled_millis: 0} + +--- +"Multiple slices with wait_for_completion=false": + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 2 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 3 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 4 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + update_by_query: + index: test + wait_for_completion: false + slices: 5 + body: + query: + match_all: {} + - is_false: timed_out + - match: {task: '/.+:\d+/'} + - set: {task: task} + - is_false: updated + - is_false: version_conflicts + - is_false: batches + - is_false: failures + - is_false: noops + - is_false: took + - is_false: throttled_millis + - is_false: created + - is_false: deleted + + - do: + tasks.get: + wait_for_completion: true + task_id: $task + - is_false: node_failures + # The task will be in the response even if it finished before we got here + # because of task persistence. + - is_true: task + - match: {response.total: 4} + - match: {response.updated: 4} + - match: {response.version_conflicts: 0} + - match: {response.failures: []} + - match: {response.throttled_millis: 0} + - gte: { response.took: 0 } + - is_false: response.task + - match: {response.slices.0.version_conflicts: 0} + - match: {response.slices.0.throttled_millis: 0} + - match: {response.slices.1.version_conflicts: 0} + - match: {response.slices.1.throttled_millis: 0} + - match: {response.slices.2.version_conflicts: 0} + - match: {response.slices.2.throttled_millis: 0} + - match: {response.slices.3.version_conflicts: 0} + - match: {response.slices.3.throttled_millis: 0} + - match: {response.slices.4.version_conflicts: 0} + - match: {response.slices.4.throttled_millis: 0} + - match: {task.status.total: 4} + - match: {task.status.updated: 4} + - match: {task.status.version_conflicts: 0} + - match: {task.status.throttled_millis: 0} + - match: {task.status.slices.0.version_conflicts: 0} + - match: {task.status.slices.0.throttled_millis: 0} + - match: {task.status.slices.1.version_conflicts: 0} + - match: {task.status.slices.1.throttled_millis: 0} + - match: {task.status.slices.2.version_conflicts: 0} + - match: {task.status.slices.2.throttled_millis: 0} + - match: {task.status.slices.3.version_conflicts: 0} + - match: {task.status.slices.3.throttled_millis: 0} + - match: {task.status.slices.4.version_conflicts: 0} + - match: {task.status.slices.4.throttled_millis: 0} + + + # Only the "parent" reindex task wrote its status to the tasks index though + - do: + indices.refresh: {} + - do: + search: + index: .tasks + - match: { hits.total: 1 } + +--- +"Multiple slices with rethrottle": + - do: + index: + index: test + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 2 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 3 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 4 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 5 + body: { "text": "test" } + - do: + index: + index: test + type: foo + id: 6 + body: { "text": "test" } + - do: + indices.refresh: {} + + # Start the task with a requests_per_second that should make it take a very long time + - do: + update_by_query: + index: test + wait_for_completion: false + requests_per_second: 0.0001 + slices: 2 + body: + query: + match_all: {} + - is_false: timed_out + - match: {task: '/.+:\d+/'} + - set: {task: task} + - is_false: updated + - is_false: version_conflicts + - is_false: batches + - is_false: failures + - is_false: noops + - is_false: took + - is_false: throttled_millis + - is_false: created + - is_false: deleted + + # Allow the task to complete + - do: + reindex_rethrottle: + requests_per_second: -1 + task_id: $task + + - do: + tasks.get: + wait_for_completion: true + task_id: $task + - is_false: node_failures + # The task will be in the response even if it finished before we got here + # because of task persistence. + - is_true: task + - match: {response.total: 6} + - match: {response.updated: 6} + - match: {response.version_conflicts: 0} + - match: {response.failures: []} + - match: {response.throttled_millis: 0} + - gte: { response.took: 0 } + - is_false: response.task + - match: {response.slices.0.version_conflicts: 0} + - match: {response.slices.0.throttled_millis: 0} + - match: {response.slices.1.version_conflicts: 0} + - match: {response.slices.1.throttled_millis: 0} + - match: {task.status.total: 6} + - match: {task.status.updated: 6} + - match: {task.status.version_conflicts: 0} + - match: {task.status.throttled_millis: 0} + - match: {task.status.slices.0.version_conflicts: 0} + - match: {task.status.slices.0.throttled_millis: 0} + - match: {task.status.slices.1.version_conflicts: 0} + - match: {task.status.slices.1.throttled_millis: 0} + + # Only the "parent" reindex task wrote its status to the tasks index though + - do: + indices.refresh: {} + - do: + search: + index: .tasks + - match: { hits.total: 1 } diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpChannel.java b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpChannel.java index a715abfd877..d79fae21b8c 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpChannel.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpChannel.java @@ -33,6 +33,7 @@ import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelFuture; import org.jboss.netty.channel.ChannelFutureListener; @@ -41,6 +42,7 @@ import org.jboss.netty.handler.codec.http.CookieDecoder; import org.jboss.netty.handler.codec.http.CookieEncoder; import org.jboss.netty.handler.codec.http.DefaultHttpResponse; import org.jboss.netty.handler.codec.http.HttpHeaders; +import org.jboss.netty.handler.codec.http.HttpMethod; import org.jboss.netty.handler.codec.http.HttpResponse; import org.jboss.netty.handler.codec.http.HttpResponseStatus; import org.jboss.netty.handler.codec.http.HttpVersion; @@ -109,7 +111,11 @@ public final class Netty3HttpChannel extends AbstractRestChannel { boolean addedReleaseListener = false; try { buffer = Netty3Utils.toChannelBuffer(content); - resp.setContent(buffer); + if (HttpMethod.HEAD.equals(nettyRequest.getMethod())) { + resp.setContent(ChannelBuffers.EMPTY_BUFFER); + } else { + resp.setContent(buffer); + } // If our response doesn't specify a content-type header, set one setHeaderField(resp, HttpHeaders.Names.CONTENT_TYPE, response.contentType(), false); diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpRequestHandler.java b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpRequestHandler.java index 829a2d9aa06..b47b9ebd523 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpRequestHandler.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpRequestHandler.java @@ -55,7 +55,7 @@ public class Netty3HttpRequestHandler extends SimpleChannelUpstreamHandler { } // the netty HTTP handling always copy over the buffer to its own buffer, either in NioWorker internally - // when reading, or using a cumalation buffer + // when reading, or using a cumulation buffer Netty3HttpRequest httpRequest = new Netty3HttpRequest(request, e.getChannel()); Netty3HttpChannel channel = new Netty3HttpChannel(serverTransport, httpRequest, oue, detailedErrorsEnabled, threadContext); serverTransport.dispatchRequest(httpRequest, channel); diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java index c60f47ee3da..c19cbbb7c57 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/http/netty3/Netty3HttpServerTransport.java @@ -31,10 +31,9 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.NetworkExceptionHelper; import org.elasticsearch.common.transport.PortsRange; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; @@ -126,7 +125,7 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem Setting.intSetting("http.netty.max_composite_buffer_components", -1, Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_TCP_NO_DELAY = @@ -282,34 +281,42 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem @Override protected void doStart() { - this.serverOpenChannels = new Netty3OpenChannelsHandler(logger); - if (blockingServer) { - serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory( - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)) - )); - } else { - serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory( - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), - Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)), - workerCount)); - } - serverBootstrap.setPipelineFactory(configureServerChannelPipelineFactory()); + boolean success = false; + try { + this.serverOpenChannels = new Netty3OpenChannelsHandler(logger); + if (blockingServer) { + serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)) + )); + } else { + serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory( + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX)), + Executors.newCachedThreadPool(daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)), + workerCount)); + } + serverBootstrap.setPipelineFactory(configureServerChannelPipelineFactory()); - serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay); - serverBootstrap.setOption("child.keepAlive", tcpKeepAlive); - if (tcpSendBufferSize.getBytes() > 0) { + serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay); + serverBootstrap.setOption("child.keepAlive", tcpKeepAlive); + if (tcpSendBufferSize.getBytes() > 0) { - serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.getBytes()); + serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.getBytes()); + } + if (tcpReceiveBufferSize.getBytes() > 0) { + serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.getBytes()); + } + serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); + serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); + serverBootstrap.setOption("reuseAddress", reuseAddress); + serverBootstrap.setOption("child.reuseAddress", reuseAddress); + this.boundAddress = createBoundHttpAddress(); + success = true; + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } } - if (tcpReceiveBufferSize.getBytes() > 0) { - serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.getBytes()); - } - serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); - serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); - serverBootstrap.setOption("reuseAddress", reuseAddress); - serverBootstrap.setOption("child.reuseAddress", reuseAddress); - this.boundAddress = createBoundHttpAddress(); } private BoundTransportAddress createBoundHttpAddress() { @@ -321,7 +328,7 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem throw new BindHttpException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e); } - List boundAddresses = new ArrayList<>(hostAddresses.length); + List boundAddresses = new ArrayList<>(hostAddresses.length); for (InetAddress address : hostAddresses) { boundAddresses.add(bindAddress(address)); } @@ -335,15 +342,15 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem final int publishPort = resolvePublishPort(settings, boundAddresses, publishInetAddress); final InetSocketAddress publishAddress = new InetSocketAddress(publishInetAddress, publishPort); - return new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), new InetSocketTransportAddress(publishAddress)); + return new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), new TransportAddress(publishAddress)); } // package private for tests - static int resolvePublishPort(Settings settings, List boundAddresses, InetAddress publishInetAddress) { + static int resolvePublishPort(Settings settings, List boundAddresses, InetAddress publishInetAddress) { int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings); if (publishPort < 0) { - for (InetSocketTransportAddress boundAddress : boundAddresses) { + for (TransportAddress boundAddress : boundAddresses) { InetAddress boundInetAddress = boundAddress.address().getAddress(); if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { publishPort = boundAddress.getPort(); @@ -355,7 +362,7 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem // if no matching boundAddress found, check if there is a unique port for all bound addresses if (publishPort < 0) { final IntSet ports = new IntHashSet(); - for (InetSocketTransportAddress boundAddress : boundAddresses) { + for (TransportAddress boundAddress : boundAddresses) { ports.add(boundAddress.getPort()); } if (ports.size() == 1) { @@ -400,33 +407,30 @@ public class Netty3HttpServerTransport extends AbstractLifecycleComponent implem .build(); } - private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) { + private TransportAddress bindAddress(final InetAddress hostAddress) { final AtomicReference lastException = new AtomicReference<>(); final AtomicReference boundSocket = new AtomicReference<>(); - boolean success = port.iterate(new PortsRange.PortCallback() { - @Override - public boolean onPortNumber(int portNumber) { - try { - synchronized (serverChannels) { - Channel channel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)); - serverChannels.add(channel); - boundSocket.set((InetSocketAddress) channel.getLocalAddress()); - } - } catch (Exception e) { - lastException.set(e); - return false; + boolean success = port.iterate(portNumber -> { + try { + synchronized (serverChannels) { + Channel channel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)); + serverChannels.add(channel); + boundSocket.set((InetSocketAddress) channel.getLocalAddress()); } - return true; + } catch (Exception e) { + lastException.set(e); + return false; } + return true; }); if (!success) { - throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get()); + throw new BindHttpException("Failed to bind to [" + port.getPortRangeString() + "]", lastException.get()); } if (logger.isDebugEnabled()) { logger.debug("Bound http to address {{}}", NetworkAddress.format(boundSocket.get())); } - return new InetSocketTransportAddress(boundSocket.get()); + return new TransportAddress(boundSocket.get()); } @Override diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java index 03c9671ad78..667d0c60887 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3InternalESLogger.java @@ -23,9 +23,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.SuppressLoggerChecks; import org.jboss.netty.logging.AbstractInternalLogger; -/** - * - */ @SuppressLoggerChecks(reason = "safely delegates to logger") final class Netty3InternalESLogger extends AbstractInternalLogger { diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java index 8d1a6edd78b..9d71fec9c90 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Transport.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -93,7 +92,7 @@ public class Netty3Transport extends TcpTransport { public static final Setting WORKER_COUNT = new Setting<>("transport.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = @@ -332,7 +331,7 @@ public class Netty3Transport extends TcpTransport { } protected NodeChannels connectToChannelsLight(DiscoveryNode node) { - InetSocketAddress address = ((InetSocketTransportAddress) node.getAddress()).address(); + InetSocketAddress address = node.getAddress().address(); ChannelFuture connect = clientBootstrap.connect(address); connect.awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); if (!connect.isSuccess()) { @@ -352,7 +351,7 @@ public class Netty3Transport extends TcpTransport { int numConnections = connectionsPerNodeBulk + connectionsPerNodePing + connectionsPerNodeRecovery + connectionsPerNodeReg + connectionsPerNodeState; ArrayList connections = new ArrayList<>(); - InetSocketAddress address = ((InetSocketTransportAddress) node.getAddress()).address(); + InetSocketAddress address = node.getAddress().address(); for (int i = 0; i < numConnections; i++) { connections.add(clientBootstrap.connect(address)); } @@ -491,19 +490,9 @@ public class Netty3Transport extends TcpTransport { } @Override - protected void sendMessage(Channel channel, BytesReference reference, Runnable sendListener, boolean close) { + protected void sendMessage(Channel channel, BytesReference reference, Runnable sendListener) { final ChannelFuture future = channel.write(Netty3Utils.toChannelBuffer(reference)); - if (close) { - future.addListener(f -> { - try { - sendListener.run(); - } finally { - f.getChannel().close(); - } - }); - } else { - future.addListener(future1 -> sendListener.run()); - } + future.addListener(future1 -> sendListener.run()); } @Override diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java index 2cbf92997b4..f32bd5dc19b 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java @@ -37,8 +37,6 @@ import java.security.AccessController; import java.security.PrivilegedAction; import java.util.ArrayList; -/** - */ public class Netty3Utils { /** @@ -101,7 +99,6 @@ public class Netty3Utils { InternalLoggerFactory.setDefaultFactory(new InternalLoggerFactory() { @Override public InternalLogger newInstance(String name) { - name = name.replace("org.jboss.netty.", "netty3.").replace("org.jboss.netty.", "netty3."); return new Netty3InternalESLogger(Loggers.getLogger(name)); } }); diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/ESNetty3IntegTestCase.java b/modules/transport-netty3/src/test/java/org/elasticsearch/ESNetty3IntegTestCase.java index 20570536ae2..09a8641253f 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/ESNetty3IntegTestCase.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/ESNetty3IntegTestCase.java @@ -28,7 +28,6 @@ import org.elasticsearch.transport.netty3.Netty3Transport; import java.util.Arrays; import java.util.Collection; -@ESIntegTestCase.SuppressLocalMode public abstract class ESNetty3IntegTestCase extends ESIntegTestCase { @Override diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3ClientYamlTestSuiteIT.java b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3ClientYamlTestSuiteIT.java index 9ed1df1cfed..04de8e8eebe 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3ClientYamlTestSuiteIT.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3ClientYamlTestSuiteIT.java @@ -39,7 +39,7 @@ public class Netty3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpPublishPortTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpPublishPortTests.java index 05c7ee36a24..889cb50e5fe 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpPublishPortTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpPublishPortTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.http.netty3; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.test.ESTestCase; @@ -73,16 +73,16 @@ public class Netty3HttpPublishPortTests extends ESTestCase { } } - private InetSocketTransportAddress address(String host, int port) throws UnknownHostException { - return new InetSocketTransportAddress(getByName(host), port); + private TransportAddress address(String host, int port) throws UnknownHostException { + return new TransportAddress(getByName(host), port); } - private InetSocketTransportAddress randomAddress() throws UnknownHostException { + private TransportAddress randomAddress() throws UnknownHostException { return address("127.0.0." + randomIntBetween(1, 100), randomIntBetween(9200, 9300)); } - private List randomAddresses() throws UnknownHostException { - List addresses = new ArrayList<>(); + private List randomAddresses() throws UnknownHostException { + List addresses = new ArrayList<>(); for (int i = 0; i < randomIntBetween(1, 5); i++) { addresses.add(randomAddress()); } diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpRequestSizeLimitIT.java b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpRequestSizeLimitIT.java index 66d9f2c88d1..d6da2cab9d5 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpRequestSizeLimitIT.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpRequestSizeLimitIT.java @@ -22,7 +22,7 @@ import org.elasticsearch.ESNetty3IntegTestCase; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.http.HttpServerTransport; @@ -78,15 +78,15 @@ public class Netty3HttpRequestSizeLimitIT extends ESNetty3IntegTestCase { } HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); - InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress + TransportAddress transportAddress = (TransportAddress) randomFrom(httpServerTransport.boundAddress ().boundAddresses()); try (Netty3HttpClient nettyHttpClient = new Netty3HttpClient()) { - Collection singleResponse = nettyHttpClient.post(inetSocketTransportAddress.address(), requests[0]); + Collection singleResponse = nettyHttpClient.post(transportAddress.address(), requests[0]); assertThat(singleResponse, hasSize(1)); assertAtLeastOnceExpectedStatus(singleResponse, HttpResponseStatus.OK); - Collection multipleResponses = nettyHttpClient.post(inetSocketTransportAddress.address(), requests); + Collection multipleResponses = nettyHttpClient.post(transportAddress.address(), requests); assertThat(multipleResponses, hasSize(requests.length)); assertAtLeastOnceExpectedStatus(multipleResponses, HttpResponseStatus.SERVICE_UNAVAILABLE); } @@ -103,11 +103,11 @@ public class Netty3HttpRequestSizeLimitIT extends ESNetty3IntegTestCase { } HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); - InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress + TransportAddress transportAddress = (TransportAddress) randomFrom(httpServerTransport.boundAddress ().boundAddresses()); try (Netty3HttpClient nettyHttpClient = new Netty3HttpClient()) { - Collection responses = nettyHttpClient.put(inetSocketTransportAddress.address(), requestUris); + Collection responses = nettyHttpClient.put(transportAddress.address(), requestUris); assertThat(responses, hasSize(requestUris.length)); assertAllInExpectedStatus(responses, HttpResponseStatus.OK); } diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerPipeliningTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerPipeliningTests.java index b432708de15..a0f7fe787da 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerPipeliningTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerPipeliningTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.http.netty3; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.netty3.Netty3HttpServerTransport.HttpChannelPipelineFactory; @@ -41,23 +41,24 @@ import org.jboss.netty.channel.SimpleChannelUpstreamHandler; import org.jboss.netty.handler.codec.http.DefaultHttpResponse; import org.jboss.netty.handler.codec.http.HttpRequest; import org.jboss.netty.handler.codec.http.HttpResponse; -import org.jboss.netty.handler.codec.http.QueryStringDecoder; import org.junit.After; import org.junit.Before; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import static org.elasticsearch.http.netty3.Netty3HttpClient.returnHttpResponseBodies; +import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH; import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK; import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1; @@ -95,14 +96,21 @@ public class Netty3HttpServerPipeliningTests extends ESTestCase { .build(); httpServerTransport = new CustomNetty3HttpServerTransport(settings); httpServerTransport.start(); - InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress() - .boundAddresses()); + TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - List requests = Arrays.asList("/firstfast", "/slow?sleep=500", "/secondfast", "/slow?sleep=1000", "/thirdfast"); + final int numberOfRequests = randomIntBetween(4, 16); + final List requests = new ArrayList<>(numberOfRequests); + for (int i = 0; i < numberOfRequests; i++) { + if (rarely()) { + requests.add("/slow/" + i); + } else { + requests.add("/" + i); + } + } try (Netty3HttpClient nettyHttpClient = new Netty3HttpClient()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[]{})); Collection responseBodies = returnHttpResponseBodies(responses); - assertThat(responseBodies, contains("/firstfast", "/slow?sleep=500", "/secondfast", "/slow?sleep=1000", "/thirdfast")); + assertThat(responseBodies, contains(requests.toArray())); } } @@ -113,17 +121,37 @@ public class Netty3HttpServerPipeliningTests extends ESTestCase { .build(); httpServerTransport = new CustomNetty3HttpServerTransport(settings); httpServerTransport.start(); - InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress() - .boundAddresses()); + TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + final int numberOfRequests = randomIntBetween(4, 16); + final Set slowIds = new HashSet<>(); + final List requests = new ArrayList<>(numberOfRequests); + for (int i = 0; i < numberOfRequests; i++) { + if (rarely()) { + requests.add("/slow/" + i); + slowIds.add(i); + } else { + requests.add("/" + i); + } + } - List requests = Arrays.asList("/slow?sleep=1000", "/firstfast", "/secondfast", "/thirdfast", "/slow?sleep=500"); try (Netty3HttpClient nettyHttpClient = new Netty3HttpClient()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[]{})); List responseBodies = new ArrayList<>(returnHttpResponseBodies(responses)); - // we cannot be sure about the order of the fast requests, but the slow ones should have to be last - assertThat(responseBodies, hasSize(5)); - assertThat(responseBodies.get(3), is("/slow?sleep=500")); - assertThat(responseBodies.get(4), is("/slow?sleep=1000")); + // we cannot be sure about the order of the responses, but the slow ones should come last + assertThat(responseBodies, hasSize(numberOfRequests)); + for (int i = 0; i < numberOfRequests - slowIds.size(); i++) { + assertThat(responseBodies.get(i), matches("/\\d+")); + } + + final Set ids = new HashSet<>(); + for (int i = 0; i < slowIds.size(); i++) { + final String response = responseBodies.get(numberOfRequests - slowIds.size() + i); + assertThat(response, matches("/slow/\\d+")); + assertTrue(ids.add(Integer.parseInt(response.split("/")[2]))); + } + + assertThat(ids, equalTo(slowIds)); } } @@ -216,17 +244,15 @@ public class Netty3HttpServerPipeliningTests extends ESTestCase { httpResponse.headers().add(CONTENT_LENGTH, buffer.readableBytes()); httpResponse.setContent(buffer); - QueryStringDecoder decoder = new QueryStringDecoder(request.getUri()); - - final int timeout = request.getUri().startsWith("/slow") && decoder.getParameters().containsKey("sleep") - ? Integer.valueOf(decoder.getParameters().get("sleep").get(0)) : 0; - if (timeout > 0) { + final boolean slow = request.getUri().matches("/slow/\\d+"); + if (slow) { try { - Thread.sleep(timeout); + Thread.sleep(scaledRandomIntBetween(500, 1000)); } catch (InterruptedException e1) { - Thread.currentThread().interrupt(); throw new RuntimeException(e1); } + } else { + assert request.getUri().matches("/\\d+"); } if (oue != null) { diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java index d291f76ff38..6ab4dbd709f 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3HttpServerTransportTests.java @@ -22,8 +22,10 @@ package org.elasticsearch.http.netty3; import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.netty3.cors.Netty3CorsConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -103,4 +105,17 @@ public class Netty3HttpServerTransportTests extends ESTestCase { assertThat(corsConfig.allowedRequestMethods().stream().map(HttpMethod::getName).collect(Collectors.toSet()), equalTo(methods)); transport.close(); } + + public void testBindUnavailableAddress() { + try (Netty3HttpServerTransport transport = new Netty3HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool)) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); + try (Netty3HttpServerTransport otherTransport = new Netty3HttpServerTransport(settings, networkService, bigArrays, + threadPool)) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); + assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); + } + } + } } diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3PipeliningDisabledIT.java b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3PipeliningDisabledIT.java index 09325e2ed9b..946be263c7a 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3PipeliningDisabledIT.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/Netty3PipeliningDisabledIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.http.netty3; import org.elasticsearch.ESNetty3IntegTestCase; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -37,9 +36,6 @@ import static org.elasticsearch.http.netty3.Netty3HttpClient.returnOpaqueIds; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasSize; -/** - * - */ @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) public class Netty3PipeliningDisabledIT extends ESNetty3IntegTestCase { @Override @@ -57,10 +53,10 @@ public class Netty3PipeliningDisabledIT extends ESNetty3IntegTestCase { HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); - InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) randomFrom(boundAddresses); + TransportAddress transportAddress = (TransportAddress) randomFrom(boundAddresses); try (Netty3HttpClient nettyHttpClient = new Netty3HttpClient()) { - Collection responses = nettyHttpClient.get(inetSocketTransportAddress.address(), requests); + Collection responses = nettyHttpClient.get(transportAddress.address(), requests); assertThat(responses, hasSize(requests.length)); List opaqueIds = new ArrayList<>(returnOpaqueIds(responses)); diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/pipelining/HttpPipeliningHandlerTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/pipelining/HttpPipeliningHandlerTests.java index 7db2368d344..a97bd57b5a2 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/pipelining/HttpPipeliningHandlerTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/http/netty3/pipelining/HttpPipeliningHandlerTests.java @@ -68,9 +68,6 @@ import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK; import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1; import static org.jboss.netty.util.CharsetUtil.UTF_8; -/** - * - */ public class HttpPipeliningHandlerTests extends ESTestCase { private static final long RESPONSE_TIMEOUT = 10000L; diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/rest/Netty3HeadBodyIsEmptyIT.java b/modules/transport-netty3/src/test/java/org/elasticsearch/rest/Netty3HeadBodyIsEmptyIT.java new file mode 100644 index 00000000000..82e5ff9b666 --- /dev/null +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/rest/Netty3HeadBodyIsEmptyIT.java @@ -0,0 +1,23 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest; + +public class Netty3HeadBodyIsEmptyIT extends HeadBodyIsEmptyIntegTestCase { +} diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java index ba72ade58e7..bb74f9b86a7 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/Netty3SizeHeaderFrameDecoderTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.transport; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; @@ -68,11 +67,11 @@ public class Netty3SizeHeaderFrameDecoderTests extends ESTestCase { new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService()); nettyTransport.start(); TransportService transportService = new TransportService(settings, nettyTransport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); nettyTransport.transportServiceAdapter(transportService.createAdapter()); TransportAddress[] boundAddresses = nettyTransport.boundAddress().boundAddresses(); - InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) randomFrom(boundAddresses); + TransportAddress transportAddress = (TransportAddress) randomFrom(boundAddresses); port = transportAddress.address().getPort(); host = transportAddress.address().getAddress(); diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java index 7c44fc4d4ea..ac828f6786a 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3ScheduledPingTests.java @@ -66,13 +66,15 @@ public class Netty3ScheduledPingTests extends ESTestCase { NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList()); final Netty3Transport nettyA = new Netty3Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); - MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null); serviceA.start(); serviceA.acceptIncomingRequests(); final Netty3Transport nettyB = new Netty3Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); - MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null); serviceB.start(); serviceB.acceptIncomingRequests(); diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportMultiPortIntegrationIT.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportMultiPortIntegrationIT.java index d25951e254c..b5f0fc9f85b 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportMultiPortIntegrationIT.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportMultiPortIntegrationIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -77,7 +76,7 @@ public class Netty3TransportMultiPortIntegrationIT extends ESNetty3IntegTestCase .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); try (TransportClient transportClient = new MockTransportClient(settings, Netty3Plugin.class)) { - transportClient.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("127.0.0.1"), randomPort)); + transportClient.addTransportAddress(new TransportAddress(InetAddress.getByName("127.0.0.1"), randomPort)); ClusterHealthResponse response = transportClient.admin().cluster().prepareHealth().get(); assertThat(response.getStatus(), is(ClusterHealthStatus.GREEN)); } @@ -91,19 +90,19 @@ public class Netty3TransportMultiPortIntegrationIT extends ESNetty3IntegTestCase assertThat(nodeInfo.getTransport().getProfileAddresses(), hasKey("client1")); BoundTransportAddress boundTransportAddress = nodeInfo.getTransport().getProfileAddresses().get("client1"); for (TransportAddress transportAddress : boundTransportAddress.boundAddresses()) { - assertThat(transportAddress, instanceOf(InetSocketTransportAddress.class)); + assertThat(transportAddress, instanceOf(TransportAddress.class)); } // bound addresses for (TransportAddress transportAddress : boundTransportAddress.boundAddresses()) { - assertThat(transportAddress, instanceOf(InetSocketTransportAddress.class)); - assertThat(((InetSocketTransportAddress) transportAddress).address().getPort(), + assertThat(transportAddress, instanceOf(TransportAddress.class)); + assertThat(transportAddress.address().getPort(), is(allOf(greaterThanOrEqualTo(randomPort), lessThanOrEqualTo(randomPort + 10)))); } // publish address - assertThat(boundTransportAddress.publishAddress(), instanceOf(InetSocketTransportAddress.class)); - InetSocketTransportAddress publishAddress = (InetSocketTransportAddress) boundTransportAddress.publishAddress(); + assertThat(boundTransportAddress.publishAddress(), instanceOf(TransportAddress.class)); + TransportAddress publishAddress = boundTransportAddress.publishAddress(); assertThat(NetworkAddress.format(publishAddress.address().getAddress()), is("127.0.0.7")); assertThat(publishAddress.address().getPort(), is(4321)); } diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportPublishAddressIT.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportPublishAddressIT.java index a936ad7d191..e12ac27b2ba 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportPublishAddressIT.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/Netty3TransportPublishAddressIT.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESIntegTestCase; @@ -67,8 +66,8 @@ public class Netty3TransportPublishAddressIT extends ESNetty3IntegTestCase { } else { assertThat(boundTransportAddress.boundAddresses().length, greaterThan(1)); for (TransportAddress boundAddress : boundTransportAddress.boundAddresses()) { - assertThat(boundAddress, instanceOf(InetSocketTransportAddress.class)); - InetSocketTransportAddress inetBoundAddress = (InetSocketTransportAddress) boundAddress; + assertThat(boundAddress, instanceOf(TransportAddress.class)); + TransportAddress inetBoundAddress = (TransportAddress) boundAddress; if (inetBoundAddress.address().getAddress() instanceof Inet4Address) { // IPv4 address is preferred publish address for _local_ assertThat(inetBoundAddress.getPort(), equalTo(boundTransportAddress.publishAddress().getPort())); diff --git a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java index b90b788f904..b7f20df75a5 100644 --- a/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java +++ b/modules/transport-netty3/src/test/java/org/elasticsearch/transport/netty3/SimpleNetty3TransportTests.java @@ -23,13 +23,16 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.node.Node; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; @@ -45,9 +48,8 @@ import static org.hamcrest.Matchers.containsString; public class SimpleNetty3TransportTests extends AbstractSimpleTransportTestCase { - public static MockTransportService nettyFromThreadPool( - Settings settings, - ThreadPool threadPool, final Version version) { + public static MockTransportService nettyFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, + ClusterSettings clusterSettings) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Transport transport = new Netty3Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) { @@ -56,20 +58,21 @@ public class SimpleNetty3TransportTests extends AbstractSimpleTransportTestCase return version; } }; - return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + clusterSettings); } @Override - protected MockTransportService build(Settings settings, Version version) { + protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings) { settings = Settings.builder().put(settings).put(TransportSettings.PORT.getKey(), "0").build(); - MockTransportService transportService = nettyFromThreadPool(settings, threadPool, version); + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, version, clusterSettings); transportService.start(); return transportService; } public void testConnectException() throws UnknownHostException { try { - serviceA.connectToNode(new DiscoveryNode("C", new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9876), + serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), emptyMap(), emptySet(),Version.CURRENT)); fail("Expected ConnectTransportException"); } catch (ConnectTransportException e) { @@ -77,4 +80,26 @@ public class SimpleNetty3TransportTests extends AbstractSimpleTransportTestCase assertThat(e.getMessage(), containsString("[127.0.0.1:9876]")); } } + + public void testBindUnavailableAddress() { + // this is on a lower level since it needs access to the TransportService before it's started + int port = serviceA.boundAddress().publishAddress().getPort(); + Settings settings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "foobar") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .put("transport.tcp.port", port) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings); + try { + transportService.start(); + } finally { + transportService.stop(); + transportService.close(); + } + }); + assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); + } } diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 4fc38bc3947..4cd80e4a231 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -33,13 +33,13 @@ compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-tr dependencies { // network stack - compile "io.netty:netty-buffer:4.1.5.Final" - compile "io.netty:netty-codec:4.1.5.Final" - compile "io.netty:netty-codec-http:4.1.5.Final" - compile "io.netty:netty-common:4.1.5.Final" - compile "io.netty:netty-handler:4.1.5.Final" - compile "io.netty:netty-resolver:4.1.5.Final" - compile "io.netty:netty-transport:4.1.5.Final" + compile "io.netty:netty-buffer:4.1.6.Final" + compile "io.netty:netty-codec:4.1.6.Final" + compile "io.netty:netty-codec-http:4.1.6.Final" + compile "io.netty:netty-common:4.1.6.Final" + compile "io.netty:netty-handler:4.1.6.Final" + compile "io.netty:netty-resolver:4.1.6.Final" + compile "io.netty:netty-transport:4.1.6.Final" } integTest { diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.5.Final.jar.sha1 deleted file mode 100644 index e64426f033a..00000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.5.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b5fb6bccda4d63d4a74c9faccdf32f77ab66abc1 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.6.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.6.Final.jar.sha1 new file mode 100644 index 00000000000..a1b16cc76e9 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.6.Final.jar.sha1 @@ -0,0 +1 @@ +dc1a304ee7c448bfa241f60c41948db6f12b653b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.5.Final.jar.sha1 deleted file mode 100644 index 66484a93047..00000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.5.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -66bbf9324fa36467d041083f89328e2a24ec4f67 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.6.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.6.Final.jar.sha1 new file mode 100644 index 00000000000..45cf352fc1b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.6.Final.jar.sha1 @@ -0,0 +1 @@ +086ce3c559af6759a0b991b9ad2faec20981e18e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.5.Final.jar.sha1 deleted file mode 100644 index 93a445416a5..00000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.5.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -087bda1b9ec7e3f75ca721fc87735cbedad2aa1a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.6.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.6.Final.jar.sha1 new file mode 100644 index 00000000000..31ba47f6a30 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.6.Final.jar.sha1 @@ -0,0 +1 @@ +ee6c65cb3f39f212241e9131dc05a837e776fd70 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.5.Final.jar.sha1 deleted file mode 100644 index 95da96aaf80..00000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.5.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -607f8433d8782445e72abe34e43a7e57e86a5e6c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.6.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.6.Final.jar.sha1 new file mode 100644 index 00000000000..1e2bbb9656e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.6.Final.jar.sha1 @@ -0,0 +1 @@ +654e4d8e87bc442be5c1d82d0369a3885f7168ea \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.5.Final.jar.sha1 deleted file mode 100644 index 7aadd85058e..00000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.5.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6262900ee9487e62560030a136160df953b1cd6b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.6.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.6.Final.jar.sha1 new file mode 100644 index 00000000000..9cfb2d9d0d0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.6.Final.jar.sha1 @@ -0,0 +1 @@ +a24e92d67e467a3b97ff63642ab56637990685fd \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.5.Final.jar.sha1 deleted file mode 100644 index 060655014a3..00000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.5.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f367bedcdc185a727fda3296b9a18014cdc22c4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.6.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.6.Final.jar.sha1 new file mode 100644 index 00000000000..67c8474f268 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.6.Final.jar.sha1 @@ -0,0 +1 @@ +2a1c97445b6aa544cd029faa72abfb3f85e9957e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.5.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.5.Final.jar.sha1 deleted file mode 100644 index 0e7dc27daaf..00000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.5.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -37126b370722ff9631ee13c91139aacec0a71d1d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.6.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.6.Final.jar.sha1 new file mode 100644 index 00000000000..7107502b453 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.6.Final.jar.sha1 @@ -0,0 +1 @@ +4cba91ad2711c13a063d480f01edf70aa3f9387d \ No newline at end of file diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index 62443dc541e..efcc8d1f2ea 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -20,6 +20,7 @@ package org.elasticsearch.http.netty4; import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; import io.netty.channel.Channel; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelPromise; @@ -29,6 +30,7 @@ import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpVersion; @@ -87,13 +89,17 @@ final class Netty4HttpChannel extends AbstractRestChannel { return new ReleasableBytesStreamOutput(transport.bigArrays); } - @Override public void sendResponse(RestResponse response) { // if the response object was created upstream, then use it; // otherwise, create a new one ByteBuf buffer = Netty4Utils.toByteBuf(response.content()); - FullHttpResponse resp = newResponse(buffer); + final FullHttpResponse resp; + if (HttpMethod.HEAD.equals(nettyRequest.method())) { + resp = newResponse(Unpooled.EMPTY_BUFFER); + } else { + resp = newResponse(buffer); + } resp.setStatus(getStatus(response.status())); Netty4CorsHandler.setCorsResponseHeaders(nettyRequest, resp, transport.getCorsConfig()); @@ -185,8 +191,8 @@ final class Netty4HttpChannel extends AbstractRestChannel { // Determine if the request connection should be closed on completion. private boolean isCloseConnection() { final boolean http10 = isHttp10(); - return HttpHeaderValues.CLOSE.equals(nettyRequest.headers().get(HttpHeaderNames.CONNECTION)) || - (http10 && HttpHeaderValues.KEEP_ALIVE.equals(nettyRequest.headers().get(HttpHeaderNames.CONNECTION)) == false); + return HttpHeaderValues.CLOSE.contentEqualsIgnoreCase(nettyRequest.headers().get(HttpHeaderNames.CONNECTION)) || + (http10 && !HttpHeaderValues.KEEP_ALIVE.contentEqualsIgnoreCase(nettyRequest.headers().get(HttpHeaderNames.CONNECTION))); } // Create a new {@link HttpResponse} to transmit the response for the netty request. diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index cd0a208d2e6..20cdfe0a128 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -53,10 +53,9 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.NetworkExceptionHelper; import org.elasticsearch.common.transport.PortsRange; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; @@ -128,7 +127,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem Setting.intSetting("http.netty.max_composite_buffer_components", -1, Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_TCP_NO_DELAY = @@ -286,40 +285,50 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem @Override protected void doStart() { - this.serverOpenChannels = new Netty4OpenChannelsHandler(logger); + boolean success = false; + try { + this.serverOpenChannels = new Netty4OpenChannelsHandler(logger); - serverBootstrap = new ServerBootstrap(); - if (blockingServer) { - serverBootstrap.group(new OioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); - serverBootstrap.channel(OioServerSocketChannel.class); - } else { - serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); - serverBootstrap.channel(NioServerSocketChannel.class); + serverBootstrap = new ServerBootstrap(); + if (blockingServer) { + serverBootstrap.group(new OioEventLoopGroup(workerCount, daemonThreadFactory(settings, + HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); + serverBootstrap.channel(OioServerSocketChannel.class); + } else { + serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, + HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); + serverBootstrap.channel(NioServerSocketChannel.class); + } + + serverBootstrap.childHandler(configureServerChannelHandler()); + + serverBootstrap.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)); + serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)); + + final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); + if (tcpSendBufferSize.getBytes() > 0) { + serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); + } + + final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); + if (tcpReceiveBufferSize.getBytes() > 0) { + serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); + } + + serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + + final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); + serverBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); + serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); + + this.boundAddress = createBoundHttpAddress(); + success = true; + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } } - - serverBootstrap.childHandler(configureServerChannelHandler()); - - serverBootstrap.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)); - serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)); - - final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); - if (tcpSendBufferSize.getBytes() > 0) { - serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); - } - - final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); - if (tcpReceiveBufferSize.getBytes() > 0) { - serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); - } - - serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); - serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); - - final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); - serverBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); - serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); - - this.boundAddress = createBoundHttpAddress(); } private BoundTransportAddress createBoundHttpAddress() { @@ -331,7 +340,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem throw new BindHttpException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e); } - List boundAddresses = new ArrayList<>(hostAddresses.length); + List boundAddresses = new ArrayList<>(hostAddresses.length); for (InetAddress address : hostAddresses) { boundAddresses.add(bindAddress(address)); } @@ -345,15 +354,15 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem final int publishPort = resolvePublishPort(settings, boundAddresses, publishInetAddress); final InetSocketAddress publishAddress = new InetSocketAddress(publishInetAddress, publishPort); - return new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), new InetSocketTransportAddress(publishAddress)); + return new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), new TransportAddress(publishAddress)); } // package private for tests - static int resolvePublishPort(Settings settings, List boundAddresses, InetAddress publishInetAddress) { + static int resolvePublishPort(Settings settings, List boundAddresses, InetAddress publishInetAddress) { int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings); if (publishPort < 0) { - for (InetSocketTransportAddress boundAddress : boundAddresses) { + for (TransportAddress boundAddress : boundAddresses) { InetAddress boundInetAddress = boundAddress.address().getAddress(); if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { publishPort = boundAddress.getPort(); @@ -365,7 +374,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem // if no matching boundAddress found, check if there is a unique port for all bound addresses if (publishPort < 0) { final IntSet ports = new IntHashSet(); - for (InetSocketTransportAddress boundAddress : boundAddresses) { + for (TransportAddress boundAddress : boundAddresses) { ports.add(boundAddress.getPort()); } if (ports.size() == 1) { @@ -415,33 +424,30 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem .build(); } - private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) { + private TransportAddress bindAddress(final InetAddress hostAddress) { final AtomicReference lastException = new AtomicReference<>(); final AtomicReference boundSocket = new AtomicReference<>(); - boolean success = port.iterate(new PortsRange.PortCallback() { - @Override - public boolean onPortNumber(int portNumber) { - try { - synchronized (serverChannels) { - ChannelFuture future = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)).sync(); - serverChannels.add(future.channel()); - boundSocket.set((InetSocketAddress) future.channel().localAddress()); - } - } catch (Exception e) { - lastException.set(e); - return false; + boolean success = port.iterate(portNumber -> { + try { + synchronized (serverChannels) { + ChannelFuture future = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)).sync(); + serverChannels.add(future.channel()); + boundSocket.set((InetSocketAddress) future.channel().localAddress()); } - return true; + } catch (Exception e) { + lastException.set(e); + return false; } + return true; }); if (!success) { - throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get()); + throw new BindHttpException("Failed to bind to [" + port.getPortRangeString() + "]", lastException.get()); } if (logger.isDebugEnabled()) { logger.debug("Bound http to address {{}}", NetworkAddress.format(boundSocket.get())); } - return new InetSocketTransportAddress(boundSocket.get()); + return new TransportAddress(boundSocket.get()); } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 808592c58f0..77429788317 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -54,7 +54,6 @@ import org.elasticsearch.common.network.NetworkService.TcpSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -96,7 +95,7 @@ public class Netty4Transport extends TcpTransport { public static final Setting WORKER_COUNT = new Setting<>("transport.netty.worker_count", - (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope, Property.Shared); public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = @@ -333,7 +332,7 @@ public class Netty4Transport extends TcpTransport { } protected NodeChannels connectToChannelsLight(DiscoveryNode node) { - InetSocketAddress address = ((InetSocketTransportAddress) node.getAddress()).address(); + InetSocketAddress address = node.getAddress().address(); ChannelFuture connect = bootstrap.connect(address); connect.awaitUninterruptibly((long) (connectTimeout.millis() * 1.5)); if (!connect.isSuccess()) { @@ -364,7 +363,7 @@ public class Netty4Transport extends TcpTransport { connectionsPerNodeState + connectionsPerNodeRecovery; final ArrayList connections = new ArrayList<>(numConnections); - final InetSocketAddress address = ((InetSocketTransportAddress) node.getAddress()).address(); + final InetSocketAddress address = node.getAddress().address(); for (int i = 0; i < numConnections; i++) { connections.add(bootstrap.connect(address)); } @@ -445,19 +444,9 @@ public class Netty4Transport extends TcpTransport { } @Override - protected void sendMessage(Channel channel, BytesReference reference, Runnable sendListener, boolean close) { + protected void sendMessage(Channel channel, BytesReference reference, Runnable sendListener) { final ChannelFuture future = channel.writeAndFlush(Netty4Utils.toByteBuf(reference)); - if (close) { - future.addListener(f -> { - try { - sendListener.run(); - } finally { - future.channel().close(); - } - }); - } else { - future.addListener(f -> sendListener.run()); - } + future.addListener(f -> sendListener.run()); } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java index 53cf1b329aa..877d50e1674 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java @@ -41,7 +41,7 @@ public class Netty4Utils { @Override public InternalLogger newInstance(final String name) { - return new Netty4InternalESLogger(name.replace("io.netty.", "netty.")); + return new Netty4InternalESLogger(name); } }); diff --git a/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy b/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy index a8cd1a7fffd..902bfdee231 100644 --- a/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy +++ b/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy @@ -17,7 +17,7 @@ * under the License. */ -grant codeBase "${codebase.netty-transport-4.1.5.Final.jar}" { +grant codeBase "${codebase.netty-transport-4.1.6.Final.jar}" { // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java b/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java index 54fc6b66539..b38cda76c69 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java @@ -28,7 +28,6 @@ import org.elasticsearch.transport.netty4.Netty4Transport; import java.util.Arrays; import java.util.Collection; -@ESIntegTestCase.SuppressLocalMode public abstract class ESNetty4IntegTestCase extends ESIntegTestCase { @Override diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4ClientYamlTestSuiteIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4ClientYamlTestSuiteIT.java index 8f7483e2791..237227cd4df 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4ClientYamlTestSuiteIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4ClientYamlTestSuiteIT.java @@ -40,7 +40,7 @@ public class Netty4ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java index 1185419d0dd..a5e0381b3fd 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java @@ -30,10 +30,12 @@ import io.netty.channel.ChannelPipeline; import io.netty.channel.ChannelProgressivePromise; import io.netty.channel.ChannelPromise; import io.netty.channel.EventLoop; +import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpVersion; @@ -212,6 +214,37 @@ public class Netty4HttpChannelTests extends ESTestCase { } } + public void testConnectionClose() throws Exception { + final Settings settings = Settings.builder().build(); + try (Netty4HttpServerTransport httpServerTransport = + new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool)) { + httpServerTransport.start(); + final FullHttpRequest httpRequest; + final boolean close = randomBoolean(); + if (randomBoolean()) { + httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + if (close) { + httpRequest.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); + } + } else { + httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.GET, "/"); + if (!close) { + httpRequest.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE); + } + } + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); + final Netty4HttpRequest request = new Netty4HttpRequest(httpRequest, embeddedChannel); + + // send a response, the channel close status should match + assertTrue(embeddedChannel.isOpen()); + final Netty4HttpChannel channel = + new Netty4HttpChannel(httpServerTransport, request, null, randomBoolean(), threadPool.getThreadContext()); + final TestResponse resp = new TestResponse(); + channel.sendResponse(resp); + assertThat(embeddedChannel.isOpen(), equalTo(!close)); + } + } + private FullHttpResponse executeRequest(final Settings settings, final String host) { return executeRequest(settings, null, host); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPublishPortTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPublishPortTests.java index 03e09f28d25..afa513275ea 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPublishPortTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPublishPortTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.http.netty4; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.test.ESTestCase; @@ -73,16 +73,16 @@ public class Netty4HttpPublishPortTests extends ESTestCase { } } - private InetSocketTransportAddress address(String host, int port) throws UnknownHostException { - return new InetSocketTransportAddress(getByName(host), port); + private TransportAddress address(String host, int port) throws UnknownHostException { + return new TransportAddress(getByName(host), port); } - private InetSocketTransportAddress randomAddress() throws UnknownHostException { + private TransportAddress randomAddress() throws UnknownHostException { return address("127.0.0." + randomIntBetween(1, 100), randomIntBetween(9200, 9300)); } - private List randomAddresses() throws UnknownHostException { - List addresses = new ArrayList<>(); + private List randomAddresses() throws UnknownHostException { + List addresses = new ArrayList<>(); for (int i = 0; i < randomIntBetween(1, 5); i++) { addresses.add(randomAddress()); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index f1e2e922cbd..0b8b347e30f 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -24,15 +24,13 @@ import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseStatus; import java.util.Collection; @@ -82,15 +80,15 @@ public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase { } HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); - InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress + TransportAddress transportAddress = (TransportAddress) randomFrom(httpServerTransport.boundAddress ().boundAddresses()); try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { - Collection singleResponse = nettyHttpClient.post(inetSocketTransportAddress.address(), requests[0]); + Collection singleResponse = nettyHttpClient.post(transportAddress.address(), requests[0]); assertThat(singleResponse, hasSize(1)); assertAtLeastOnceExpectedStatus(singleResponse, HttpResponseStatus.OK); - Collection multipleResponses = nettyHttpClient.post(inetSocketTransportAddress.address(), requests); + Collection multipleResponses = nettyHttpClient.post(transportAddress.address(), requests); assertThat(multipleResponses, hasSize(requests.length)); assertAtLeastOnceExpectedStatus(multipleResponses, HttpResponseStatus.SERVICE_UNAVAILABLE); } @@ -107,11 +105,11 @@ public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase { } HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); - InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress + TransportAddress transportAddress = (TransportAddress) randomFrom(httpServerTransport.boundAddress ().boundAddresses()); try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { - Collection responses = nettyHttpClient.put(inetSocketTransportAddress.address(), requestUris); + Collection responses = nettyHttpClient.put(transportAddress.address(), requestUris); assertThat(responses, hasSize(requestUris.length)); assertAllInExpectedStatus(responses, HttpResponseStatus.OK); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java index 63e35a786c2..701baf80aed 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -33,7 +33,7 @@ import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpVersion; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpServerTransport; @@ -89,8 +89,7 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase { .build(); try (final HttpServerTransport httpServerTransport = new CustomNettyHttpServerTransport(settings)) { httpServerTransport.start(); - final InetSocketTransportAddress transportAddress = - (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses()); + final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); final int numberOfRequests = randomIntBetween(4, 16); final List requests = new ArrayList<>(numberOfRequests); @@ -117,18 +116,15 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase { .build(); try (final HttpServerTransport httpServerTransport = new CustomNettyHttpServerTransport(settings)) { httpServerTransport.start(); - final InetSocketTransportAddress transportAddress = - (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses()); + final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); final int numberOfRequests = randomIntBetween(4, 16); final Set slowIds = new HashSet<>(); final List requests = new ArrayList<>(numberOfRequests); - int numberOfSlowRequests = 0; for (int i = 0; i < numberOfRequests; i++) { if (rarely()) { requests.add("/slow/" + i); slowIds.add(i); - numberOfSlowRequests++; } else { requests.add("/" + i); } @@ -137,16 +133,15 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase { try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[]{})); List responseBodies = new ArrayList<>(Netty4HttpClient.returnHttpResponseBodies(responses)); - // we can not be sure about the order of the responses, but the slow ones should - // come last + // we can not be sure about the order of the responses, but the slow ones should come last assertThat(responseBodies, hasSize(numberOfRequests)); - for (int i = 0; i < numberOfRequests - numberOfSlowRequests; i++) { + for (int i = 0; i < numberOfRequests - slowIds.size(); i++) { assertThat(responseBodies.get(i), matches("/\\d+")); } final Set ids = new HashSet<>(); - for (int i = 0; i < numberOfSlowRequests; i++) { - final String response = responseBodies.get(numberOfRequests - numberOfSlowRequests + i); + for (int i = 0; i < slowIds.size(); i++) { + final String response = responseBodies.get(numberOfRequests - slowIds.size() + i); assertThat(response, matches("/slow/\\d+" )); assertTrue(ids.add(Integer.parseInt(response.split("/")[2]))); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index faea5c8ec18..498daf63226 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -32,8 +32,9 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.netty4.cors.Netty4CorsConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.BytesRestResponse; @@ -123,7 +124,7 @@ public class Netty4HttpServerTransportTests extends ESTestCase { transport.httpServerAdapter((request, channel, context) -> channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done")))); transport.start(); - InetSocketTransportAddress remoteAddress = (InetSocketTransportAddress) randomFrom(transport.boundAddress().boundAddresses()); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); try (Netty4HttpClient client = new Netty4HttpClient()) { FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); @@ -140,4 +141,17 @@ public class Netty4HttpServerTransportTests extends ESTestCase { } } } + + public void testBindUnavailableAddress() { + try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool)) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); + try (Netty4HttpServerTransport otherTransport = new Netty4HttpServerTransport(settings, networkService, bigArrays, + threadPool)) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); + assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); + } + } + } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java index 4ea46c651fc..9f117d4ee21 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java @@ -22,7 +22,6 @@ import io.netty.handler.codec.http.FullHttpResponse; import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -54,10 +53,10 @@ public class Netty4PipeliningDisabledIT extends ESNetty4IntegTestCase { HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); - InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) randomFrom(boundAddresses); + TransportAddress transportAddress = (TransportAddress) randomFrom(boundAddresses); try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { - Collection responses = nettyHttpClient.get(inetSocketTransportAddress.address(), requests); + Collection responses = nettyHttpClient.get(transportAddress.address(), requests); assertThat(responses, hasSize(requests.length)); List opaqueIds = new ArrayList<>(Netty4HttpClient.returnOpaqueIds(responses)); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java index c2222562d72..cc3f22be453 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java @@ -23,10 +23,8 @@ import io.netty.handler.codec.http.FullHttpResponse; import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.HttpServerTransport; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -53,10 +51,10 @@ public class Netty4PipeliningEnabledIT extends ESNetty4IntegTestCase { HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); - InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) randomFrom(boundAddresses); + TransportAddress transportAddress = (TransportAddress) randomFrom(boundAddresses); try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { - Collection responses = nettyHttpClient.get(inetSocketTransportAddress.address(), requests); + Collection responses = nettyHttpClient.get(transportAddress.address(), requests); assertThat(responses, hasSize(5)); Collection opaqueIds = Netty4HttpClient.returnOpaqueIds(responses); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java new file mode 100644 index 00000000000..8716f59ee00 --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java @@ -0,0 +1,23 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest; + +public class Netty4HeadBodyIsEmptyIT extends HeadBodyIsEmptyIntegTestCase { +} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java index 0b8d5fb6a35..2786077d084 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java @@ -66,13 +66,15 @@ public class Netty4ScheduledPingTests extends ESTestCase { NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList()); final Netty4Transport nettyA = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); - MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null); serviceA.start(); serviceA.acceptIncomingRequests(); final Netty4Transport nettyB = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); - MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null); serviceB.start(); serviceB.acceptIncomingRequests(); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index 165b74431e1..0e9ebe5f7f4 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.transport.netty4; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; @@ -69,7 +68,7 @@ public class Netty4SizeHeaderFrameDecoderTests extends ESTestCase { nettyTransport.start(); TransportAddress[] boundAddresses = nettyTransport.boundAddress().boundAddresses(); - InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) randomFrom(boundAddresses); + TransportAddress transportAddress = (TransportAddress) randomFrom(boundAddresses); port = transportAddress.address().getPort(); host = transportAddress.address().getAddress(); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java index 32268d1f5d8..bfd03ff75c6 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -77,7 +76,7 @@ public class Netty4TransportMultiPortIntegrationIT extends ESNetty4IntegTestCase .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); try (TransportClient transportClient = new MockTransportClient(settings, Netty4Plugin.class)) { - transportClient.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("127.0.0.1"), randomPort)); + transportClient.addTransportAddress(new TransportAddress(InetAddress.getByName("127.0.0.1"), randomPort)); ClusterHealthResponse response = transportClient.admin().cluster().prepareHealth().get(); assertThat(response.getStatus(), is(ClusterHealthStatus.GREEN)); } @@ -91,19 +90,19 @@ public class Netty4TransportMultiPortIntegrationIT extends ESNetty4IntegTestCase assertThat(nodeInfo.getTransport().getProfileAddresses(), hasKey("client1")); BoundTransportAddress boundTransportAddress = nodeInfo.getTransport().getProfileAddresses().get("client1"); for (TransportAddress transportAddress : boundTransportAddress.boundAddresses()) { - assertThat(transportAddress, instanceOf(InetSocketTransportAddress.class)); + assertThat(transportAddress, instanceOf(TransportAddress.class)); } // bound addresses for (TransportAddress transportAddress : boundTransportAddress.boundAddresses()) { - assertThat(transportAddress, instanceOf(InetSocketTransportAddress.class)); - assertThat(((InetSocketTransportAddress) transportAddress).address().getPort(), + assertThat(transportAddress, instanceOf(TransportAddress.class)); + assertThat(transportAddress.address().getPort(), is(allOf(greaterThanOrEqualTo(randomPort), lessThanOrEqualTo(randomPort + 10)))); } // publish address - assertThat(boundTransportAddress.publishAddress(), instanceOf(InetSocketTransportAddress.class)); - InetSocketTransportAddress publishAddress = (InetSocketTransportAddress) boundTransportAddress.publishAddress(); + assertThat(boundTransportAddress.publishAddress(), instanceOf(TransportAddress.class)); + TransportAddress publishAddress = boundTransportAddress.publishAddress(); assertThat(NetworkAddress.format(publishAddress.address().getAddress()), is("127.0.0.7")); assertThat(publishAddress.address().getPort(), is(4321)); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportPublishAddressIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportPublishAddressIT.java index 6a6f7bee80e..922031d3c3d 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportPublishAddressIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportPublishAddressIT.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.Netty4Plugin; @@ -76,8 +75,8 @@ public class Netty4TransportPublishAddressIT extends ESNetty4IntegTestCase { } else { assertThat(boundTransportAddress.boundAddresses().length, greaterThan(1)); for (TransportAddress boundAddress : boundTransportAddress.boundAddresses()) { - assertThat(boundAddress, instanceOf(InetSocketTransportAddress.class)); - InetSocketTransportAddress inetBoundAddress = (InetSocketTransportAddress) boundAddress; + assertThat(boundAddress, instanceOf(TransportAddress.class)); + TransportAddress inetBoundAddress = (TransportAddress) boundAddress; if (inetBoundAddress.address().getAddress() instanceof Inet4Address) { // IPv4 address is preferred publish address for _local_ assertThat(inetBoundAddress.getPort(), equalTo(boundTransportAddress.publishAddress().getPort())); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 3a3a4587cac..a7a674007ba 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -23,13 +23,16 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.node.Node; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; @@ -45,9 +48,8 @@ import static org.hamcrest.Matchers.containsString; public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase { - public static MockTransportService nettyFromThreadPool( - Settings settings, - ThreadPool threadPool, final Version version) { + public static MockTransportService nettyFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, + ClusterSettings clusterSettings) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Transport transport = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) { @@ -56,20 +58,21 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase return version; } }; - return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + clusterSettings); } @Override - protected MockTransportService build(Settings settings, Version version) { + protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings) { settings = Settings.builder().put(settings).put(TransportSettings.PORT.getKey(), "0").build(); - MockTransportService transportService = nettyFromThreadPool(settings, threadPool, version); + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, version, clusterSettings); transportService.start(); return transportService; } public void testConnectException() throws UnknownHostException { try { - serviceA.connectToNode(new DiscoveryNode("C", new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9876), + serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), emptyMap(), emptySet(),Version.CURRENT)); fail("Expected ConnectTransportException"); } catch (ConnectTransportException e) { @@ -78,4 +81,26 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase } } + public void testBindUnavailableAddress() { + // this is on a lower level since it needs access to the TransportService before it's started + int port = serviceA.boundAddress().publishAddress().getPort(); + Settings settings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "foobar") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .put("transport.tcp.port", port) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings); + try { + transportService.start(); + } finally { + transportService.stop(); + transportService.close(); + } + }); + assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); + } + } diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.2.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.2.0.jar.sha1 deleted file mode 100644 index 2a734f79a3f..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68de5f298090b92aa9a803eb4f5aed0c9104e685 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..29114cfcf70 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +39e5761c8209a6e4e940a3aec4ba57a6b631ca00 \ No newline at end of file diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java index 9cc42e726a5..3adced6ffab 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java @@ -26,9 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; - -/** - */ public class IcuTransformTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { private final String id; diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuClientYamlTestSuiteIT.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuClientYamlTestSuiteIT.java index 47224836037..ce2e660ecfc 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuClientYamlTestSuiteIT.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class IcuClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuTokenizerFactoryTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuTokenizerFactoryTests.java index 716d07385b8..ffc6cab6a25 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuTokenizerFactoryTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuTokenizerFactoryTests.java @@ -37,8 +37,6 @@ import java.nio.file.Path; import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; -/** - */ public class IcuTokenizerFactoryTests extends ESTestCase { public void testSimpleIcuTokenizer() throws IOException { diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java index 86338e0670d..3cd675c221a 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java @@ -27,8 +27,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import static org.hamcrest.Matchers.instanceOf; -/** - */ + public class SimpleIcuAnalysisTests extends ESTestCase { public void testDefaultsIcuAnalysis() throws IOException { TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisICUPlugin()); diff --git a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yaml b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yaml index 64fbbcadf7d..180f6c6f5b6 100644 --- a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yaml +++ b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yaml @@ -3,8 +3,9 @@ "Tokenizer": - do: indices.analyze: - text: Foo Bar - tokenizer: icu_tokenizer + body: + text: Foo Bar + tokenizer: icu_tokenizer - length: { tokens: 2 } - match: { tokens.0.token: Foo } - match: { tokens.1.token: Bar } @@ -12,26 +13,29 @@ "Normalization filter": - do: indices.analyze: - filter: icu_normalizer - text: Foo Bar Ruß - tokenizer: keyword + body: + filter: [icu_normalizer] + text: Foo Bar Ruß + tokenizer: keyword - length: { tokens: 1 } - match: { tokens.0.token: foo bar russ } --- "Normalization charfilter": - do: indices.analyze: - char_filter: icu_normalizer - text: Foo Bar Ruß - tokenizer: keyword + body: + char_filter: [icu_normalizer] + text: Foo Bar Ruß + tokenizer: keyword - length: { tokens: 1 } - match: { tokens.0.token: foo bar russ } --- "Folding filter": - do: indices.analyze: - filter: icu_folding - text: Foo Bar résumé - tokenizer: keyword + body: + filter: [icu_folding] + text: Foo Bar résumé + tokenizer: keyword - length: { tokens: 1 } - match: { tokens.0.token: foo bar resume } diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.2.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.2.0.jar.sha1 deleted file mode 100644 index 749cb8ecde8..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -17ee76df332c0342a172790472b777086487a299 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..2ec23fb8b2d --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +c4230c40a10cbb4ad54bcbe9e4265ecb598a4c25 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java index 0c6ab2d3ea6..1776977c8e2 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java @@ -29,8 +29,6 @@ import org.elasticsearch.index.IndexSettings; import java.util.Set; -/** - */ public class KuromojiAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final JapaneseAnalyzer analyzer; diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java index 9e41621525a..2f00e68a75e 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java @@ -31,8 +31,6 @@ import org.elasticsearch.index.IndexSettings; import java.io.IOException; import java.io.Reader; -/** - */ public class KuromojiTokenizerFactory extends AbstractTokenizerFactory { private static final String USER_DICT_OPTION = "user_dictionary"; diff --git a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java index 2da9416fbdc..c0271c99784 100644 --- a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java +++ b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java @@ -44,8 +44,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; -/** - */ public class KuromojiAnalysisTests extends ESTestCase { public void testDefaultsKuromojiAnalysis() throws IOException { TestAnalysis analysis = createTestAnalysis(); diff --git a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiClientYamlTestSuiteIT.java b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiClientYamlTestSuiteIT.java index e99c5c2bacf..0797b10d774 100644 --- a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiClientYamlTestSuiteIT.java +++ b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class KuromojiClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yaml b/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yaml index 42df558567d..1cca2b728e0 100644 --- a/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yaml +++ b/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yaml @@ -4,8 +4,9 @@ "Analyzer": - do: indices.analyze: - text: JR新宿駅の近くにビールを飲みに行こうか - analyzer: kuromoji + body: + text: JR新宿駅の近くにビールを飲みに行こうか + analyzer: kuromoji - length: { tokens: 7 } - match: { tokens.0.token: jr } - match: { tokens.1.token: 新宿 } @@ -18,8 +19,9 @@ "Tokenizer": - do: indices.analyze: - text: 関西国際空港 - tokenizer: kuromoji_tokenizer + body: + text: 関西国際空港 + tokenizer: kuromoji_tokenizer - length: { tokens: 4 } - match: { tokens.0.token: 関西 } - match: { tokens.1.token: 関西国際空港 } @@ -29,26 +31,29 @@ "Baseform filter": - do: indices.analyze: - text: 飲み - tokenizer: kuromoji_tokenizer - filter: kuromoji_baseform + body: + text: 飲み + tokenizer: kuromoji_tokenizer + filter: [kuromoji_baseform] - length: { tokens: 1 } - match: { tokens.0.token: 飲む } --- "Reading filter": - do: indices.analyze: - text: 寿司 - tokenizer: kuromoji_tokenizer - filter: kuromoji_readingform + body: + text: 寿司 + tokenizer: kuromoji_tokenizer + filter: [kuromoji_readingform] - length: { tokens: 1 } - match: { tokens.0.token: スシ } --- "Stemming filter": - do: indices.analyze: - text: サーバー - tokenizer: kuromoji_tokenizer - filter: kuromoji_stemmer + body: + text: サーバー + tokenizer: kuromoji_tokenizer + filter: [kuromoji_stemmer] - length: { tokens: 1 } - match: { tokens.0.token: サーバ } diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.2.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.2.0.jar.sha1 deleted file mode 100644 index 359173e0084..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d2a6b8679563d9f044eb1cee580282b20d8e149 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..27a5a67a55a --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +ccd0636f0df42146b5c77cac5ec57739c9ff2893 \ No newline at end of file diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java index 75da19c0a3c..ff4ab4943e3 100644 --- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java +++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java @@ -45,9 +45,6 @@ import org.elasticsearch.index.analysis.phonetic.HaasePhonetik; import org.elasticsearch.index.analysis.phonetic.KoelnerPhonetik; import org.elasticsearch.index.analysis.phonetic.Nysiis; -/** - * - */ public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory { private final Encoder encoder; diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/PhoneticClientYamlTestSuiteIT.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/PhoneticClientYamlTestSuiteIT.java index 975b84f1574..447eb1d6cd7 100644 --- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/PhoneticClientYamlTestSuiteIT.java +++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/PhoneticClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class PhoneticClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java index b0c23e29abd..127a258f75a 100644 --- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java @@ -31,8 +31,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.instanceOf; -/** - */ public class SimplePhoneticAnalysisTests extends ESTestCase { public void testPhoneticTokenFilterFactory() throws IOException { String yaml = "/org/elasticsearch/index/analysis/phonetic-1.yml"; diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yaml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yaml index 02d4b315b6e..1f326fe3776 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yaml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yaml @@ -22,8 +22,9 @@ - do: indices.analyze: index: phonetic_sample - analyzer: my_analyzer - text: Joe Bloggs + body: + analyzer: my_analyzer + text: Joe Bloggs - length: { tokens: 4 } - match: { tokens.0.token: J } diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yaml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yaml index 675847e557e..5af9f48aa80 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yaml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yaml @@ -22,8 +22,9 @@ - do: indices.analyze: index: phonetic_sample - analyzer: my_analyzer - text: supercalifragilisticexpialidocious + body: + analyzer: my_analyzer + text: supercalifragilisticexpialidocious - length: { tokens: 1 } - match: { tokens.0.token: SPRKLF } diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yaml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yaml index 015610af172..259b0adea74 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yaml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yaml @@ -24,8 +24,9 @@ - do: indices.analyze: index: phonetic_sample - analyzer: my_analyzer - text: Szwarc + body: + analyzer: my_analyzer + text: Szwarc - length: { tokens: 1 } - match: { tokens.0.token: Svarts } diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yaml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yaml index 5125ae3d684..c67b6892bc9 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yaml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yaml @@ -21,8 +21,9 @@ - do: indices.analyze: index: phonetic_sample - analyzer: my_analyzer - text: Moskowitz + body: + analyzer: my_analyzer + text: Moskowitz - length: { tokens: 1 } - match: { tokens.0.token: "645740" } diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.2.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.2.0.jar.sha1 deleted file mode 100644 index 66e339bfa2f..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba3fd99d1cf47d31b82817accdb199fc7a8d838d \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..a70cf1ae74f --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +17b3d2f5ffd58756b6d5bdc651eb2ea461885d0a \ No newline at end of file diff --git a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java index 591912b8fa3..3d619c2d306 100644 --- a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java +++ b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java @@ -24,8 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -/** - */ public class SmartChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final SmartChineseAnalyzer analyzer; diff --git a/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SmartCNClientYamlTestSuiteIT.java b/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SmartCNClientYamlTestSuiteIT.java index 6415dc436eb..534af79a199 100644 --- a/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SmartCNClientYamlTestSuiteIT.java +++ b/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SmartCNClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class SmartCNClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/10_basic.yaml b/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/10_basic.yaml index 2549f774f81..0f1b2805c93 100644 --- a/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/10_basic.yaml +++ b/plugins/analysis-smartcn/src/test/resources/rest-api-spec/test/analysis_smartcn/10_basic.yaml @@ -3,8 +3,9 @@ "Tokenizer": - do: indices.analyze: - text: 我购买了道具和服装。 - tokenizer: smartcn_tokenizer + body: + text: 我购买了道具和服装。 + tokenizer: smartcn_tokenizer - length: { tokens: 7 } - match: { tokens.0.token: 我 } - match: { tokens.1.token: 购买 } @@ -17,8 +18,9 @@ "Analyzer": - do: indices.analyze: - text: 我购买了道具和服装。 - analyzer: smartcn + body: + text: 我购买了道具和服装。 + analyzer: smartcn - length: { tokens: 6 } - match: { tokens.0.token: 我 } - match: { tokens.1.token: 购买 } diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.2.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.2.0.jar.sha1 deleted file mode 100644 index 5cfb071f3a3..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09d2a759a765f73e2e7becbc560411469c464cfa \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..466578a5e24 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +d3d540a7225837e25cc0ed02aefb0c7763e0f832 \ No newline at end of file diff --git a/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishAnalyzerProvider.java b/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishAnalyzerProvider.java index d80939cea04..fa5f82ccd30 100644 --- a/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishAnalyzerProvider.java +++ b/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishAnalyzerProvider.java @@ -25,8 +25,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; -/** - */ public class PolishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final PolishAnalyzer analyzer; diff --git a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java index d0b81f01d01..2e606321f60 100644 --- a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java +++ b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java @@ -32,8 +32,6 @@ import java.io.IOException; import static org.hamcrest.Matchers.instanceOf; -/** - */ public class PolishAnalysisTests extends ESTestCase { public void testDefaultsPolishAnalysis() throws IOException { final TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, diff --git a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/StempelClientYamlTestSuiteIT.java b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/StempelClientYamlTestSuiteIT.java index 34d264122ef..56edcdb692c 100644 --- a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/StempelClientYamlTestSuiteIT.java +++ b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/StempelClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class StempelClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yaml b/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yaml index f87f00b7922..1941126c64f 100644 --- a/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yaml +++ b/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yaml @@ -3,16 +3,18 @@ "Stemmer": - do: indices.analyze: - text: studenci - tokenizer: keyword - filter: polish_stem + body: + text: studenci + tokenizer: keyword + filter: [polish_stem] - length: { tokens: 1 } - match: { tokens.0.token: student } --- "Analyzer": - do: indices.analyze: - text: studenta był - analyzer: polish + body: + text: studenta był + analyzer: polish - length: { tokens: 1 } - match: { tokens.0.token: student } diff --git a/plugins/analysis-ukrainian/build.gradle b/plugins/analysis-ukrainian/build.gradle new file mode 100644 index 00000000000..b3c5473a2ff --- /dev/null +++ b/plugins/analysis-ukrainian/build.gradle @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +esplugin { + description 'The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch.' + classname 'org.elasticsearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin' +} + +dependencies { + compile "org.apache.lucene:lucene-analyzers-morfologik:${versions.lucene}" + compile "org.carrot2:morfologik-stemming:2.1.0" + compile "org.carrot2:morfologik-fsa:2.1.0" +} + +dependencyLicenses { + mapping from: /lucene-.*/, to: 'lucene' + mapping from: /morfologik-.*/, to: 'lucene' +} + +thirdPartyAudit.excludes = [ + // we don't use the morfologik-fsa polish stemmer + 'morfologik.stemming.polish.PolishStemmer' +] diff --git a/plugins/analysis-ukrainian/licenses/lucene-LICENSE.txt b/plugins/analysis-ukrainian/licenses/lucene-LICENSE.txt new file mode 100644 index 00000000000..28b134f5f8e --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-LICENSE.txt @@ -0,0 +1,475 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from unicode conversion examples available at +http://www.unicode.org/Public/PROGRAMS/CVTUTF. Here is the copyright +from those sources: + +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + + +Some code in core/src/java/org/apache/lucene/util/ArrayUtil.java was +derived from Python 2.4.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/2.4.2/license/ + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from Python 3.1.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/3.1.2/license/ + +Some code in core/src/java/org/apache/lucene/util/automaton was +derived from Brics automaton sources available at +www.brics.dk/automaton/. Here is the copyright from those sources: + +/* + * Copyright (c) 2001-2009 Anders Moeller + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +The levenshtein automata tables in core/src/java/org/apache/lucene/util/automaton +were automatically generated with the moman/finenight FSA package. +Here is the copyright for those sources: + +# Copyright (c) 2010, Jean-Philippe Barrette-LaPierre, +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from ICU (http://www.icu-project.org) +The full license is available here: + http://source.icu-project.org/repos/icu/icu/trunk/license.html + +/* + * Copyright (C) 1999-2010, International Business Machines + * Corporation and others. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, and/or sell copies of the + * Software, and to permit persons to whom the Software is furnished to do so, + * provided that the above copyright notice(s) and this permission notice appear + * in all copies of the Software and that both the above copyright notice(s) and + * this permission notice appear in supporting documentation. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE + * LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR + * ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER + * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Except as contained in this notice, the name of a copyright holder shall not + * be used in advertising or otherwise to promote the sale, use or other + * dealings in this Software without prior written authorization of the + * copyright holder. + */ + +The following license applies to the Snowball stemmers: + +Copyright (c) 2001, Dr Martin Porter +Copyright (c) 2002, Richard Boulton +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The following license applies to the KStemmer: + +Copyright © 2003, +Center for Intelligent Information Retrieval, +University of Massachusetts, Amherst. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. The names "Center for Intelligent Information Retrieval" and +"University of Massachusetts" must not be used to endorse or promote products +derived from this software without prior written permission. To obtain +permission, contact info@ciir.cs.umass.edu. + +THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +The following license applies to the Morfologik project: + +Copyright (c) 2006 Dawid Weiss +Copyright (c) 2007-2011 Dawid Weiss, Marcin Miłkowski +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Morfologik nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--- + +The dictionary comes from Morfologik project. Morfologik uses data from +Polish ispell/myspell dictionary hosted at http://www.sjp.pl/slownik/en/ and +is licenced on the terms of (inter alia) LGPL and Creative Commons +ShareAlike. The part-of-speech tags were added in Morfologik project and +are not found in the data from sjp.pl. The tagset is similar to IPI PAN +tagset. + +--- + +The following license applies to the Morfeusz project, +used by org.apache.lucene.analysis.morfologik. + +BSD-licensed dictionary of Polish (SGJP) +http://sgjp.pl/morfeusz/ + +Copyright © 2011 Zygmunt Saloni, Włodzimierz Gruszczyński, + Marcin Woliński, Robert Wołosz + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/analysis-ukrainian/licenses/lucene-NOTICE.txt b/plugins/analysis-ukrainian/licenses/lucene-NOTICE.txt new file mode 100644 index 00000000000..ecf08201a5e --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-NOTICE.txt @@ -0,0 +1,191 @@ +Apache Lucene +Copyright 2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Includes software from other Apache Software Foundation projects, +including, but not limited to: + - Apache Ant + - Apache Jakarta Regexp + - Apache Commons + - Apache Xerces + +ICU4J, (under analysis/icu) is licensed under an MIT styles license +and Copyright (c) 1995-2008 International Business Machines Corporation and others + +Some data files (under analysis/icu/src/data) are derived from Unicode data such +as the Unicode Character Database. See http://unicode.org/copyright.html for more +details. + +Brics Automaton (under core/src/java/org/apache/lucene/util/automaton) is +BSD-licensed, created by Anders Møller. See http://www.brics.dk/automaton/ + +The levenshtein automata tables (under core/src/java/org/apache/lucene/util/automaton) were +automatically generated with the moman/finenight FSA library, created by +Jean-Philippe Barrette-LaPierre. This library is available under an MIT license, +see http://sites.google.com/site/rrettesite/moman and +http://bitbucket.org/jpbarrette/moman/overview/ + +The class org.apache.lucene.util.WeakIdentityMap was derived from +the Apache CXF project and is Apache License 2.0. + +The Google Code Prettify is Apache License 2.0. +See http://code.google.com/p/google-code-prettify/ + +JUnit (junit-4.10) is licensed under the Common Public License v. 1.0 +See http://junit.sourceforge.net/cpl-v10.html + +This product includes code (JaspellTernarySearchTrie) from Java Spelling Checkin +g Package (jaspell): http://jaspell.sourceforge.net/ +License: The BSD License (http://www.opensource.org/licenses/bsd-license.php) + +The snowball stemmers in + analysis/common/src/java/net/sf/snowball +were developed by Martin Porter and Richard Boulton. +The snowball stopword lists in + analysis/common/src/resources/org/apache/lucene/analysis/snowball +were developed by Martin Porter and Richard Boulton. +The full snowball package is available from + http://snowball.tartarus.org/ + +The KStem stemmer in + analysis/common/src/org/apache/lucene/analysis/en +was developed by Bob Krovetz and Sergio Guzman-Lara (CIIR-UMass Amherst) +under the BSD-license. + +The Arabic,Persian,Romanian,Bulgarian, and Hindi analyzers (common) come with a default +stopword list that is BSD-licensed created by Jacques Savoy. These files reside in: +analysis/common/src/resources/org/apache/lucene/analysis/ar/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/fa/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/ro/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/bg/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/hi/stopwords.txt +See http://members.unine.ch/jacques.savoy/clef/index.html. + +The German,Spanish,Finnish,French,Hungarian,Italian,Portuguese,Russian and Swedish light stemmers +(common) are based on BSD-licensed reference implementations created by Jacques Savoy and +Ljiljana Dolamic. These files reside in: +analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java + +The Stempel analyzer (stempel) includes BSD-licensed software developed +by the Egothor project http://egothor.sf.net/, created by Leo Galambos, Martin Kvapil, +and Edmond Nolan. + +The Polish analyzer (stempel) comes with a default +stopword list that is BSD-licensed created by the Carrot2 project. The file resides +in stempel/src/resources/org/apache/lucene/analysis/pl/stopwords.txt. +See http://project.carrot2.org/license.html. + +The SmartChineseAnalyzer source code (smartcn) was +provided by Xiaoping Gao and copyright 2009 by www.imdict.net. + +WordBreakTestUnicode_*.java (under modules/analysis/common/src/test/) +is derived from Unicode data such as the Unicode Character Database. +See http://unicode.org/copyright.html for more details. + +The Morfologik analyzer (morfologik) includes BSD-licensed software +developed by Dawid Weiss and Marcin Miłkowski (http://morfologik.blogspot.com/). + +Morfologik uses data from Polish ispell/myspell dictionary +(http://www.sjp.pl/slownik/en/) licenced on the terms of (inter alia) +LGPL and Creative Commons ShareAlike. + +Morfologic includes data from BSD-licensed dictionary of Polish (SGJP) +(http://sgjp.pl/morfeusz/) + +Servlet-api.jar and javax.servlet-*.jar are under the CDDL license, the original +source code for this can be found at http://www.eclipse.org/jetty/downloads.php + +=========================================================================== +Kuromoji Japanese Morphological Analyzer - Apache Lucene Integration +=========================================================================== + +This software includes a binary and/or source version of data from + + mecab-ipadic-2.7.0-20070801 + +which can be obtained from + + http://atilika.com/releases/mecab-ipadic/mecab-ipadic-2.7.0-20070801.tar.gz + +or + + http://jaist.dl.sourceforge.net/project/mecab/mecab-ipadic/2.7.0-20070801/mecab-ipadic-2.7.0-20070801.tar.gz + +=========================================================================== +mecab-ipadic-2.7.0-20070801 Notice +=========================================================================== + +Nara Institute of Science and Technology (NAIST), +the copyright holders, disclaims all warranties with regard to this +software, including all implied warranties of merchantability and +fitness, in no event shall NAIST be liable for +any special, indirect or consequential damages or any damages +whatsoever resulting from loss of use, data or profits, whether in an +action of contract, negligence or other tortuous action, arising out +of or in connection with the use or performance of this software. + +A large portion of the dictionary entries +originate from ICOT Free Software. The following conditions for ICOT +Free Software applies to the current dictionary as well. + +Each User may also freely distribute the Program, whether in its +original form or modified, to any third party or parties, PROVIDED +that the provisions of Section 3 ("NO WARRANTY") will ALWAYS appear +on, or be attached to, the Program, which is distributed substantially +in the same form as set out herein and that such intended +distribution, if actually made, will neither violate or otherwise +contravene any of the laws and regulations of the countries having +jurisdiction over the User or the intended distribution itself. + +NO WARRANTY + +The program was produced on an experimental basis in the course of the +research and development conducted during the project and is provided +to users as so produced on an experimental basis. Accordingly, the +program is provided without any warranty whatsoever, whether express, +implied, statutory or otherwise. The term "warranty" used herein +includes, but is not limited to, any warranty of the quality, +performance, merchantability and fitness for a particular purpose of +the program and the nonexistence of any infringement or violation of +any right of any third party. + +Each user of the program will agree and understand, and be deemed to +have agreed and understood, that there is no warranty whatsoever for +the program and, accordingly, the entire risk arising from or +otherwise connected with the program is assumed by the user. + +Therefore, neither ICOT, the copyright holder, or any other +organization that participated in or was otherwise related to the +development of the program and their respective officials, directors, +officers and other employees shall be held liable for any and all +damages, including, without limitation, general, special, incidental +and consequential damages, arising out of or otherwise in connection +with the use or inability to use the program or any product, material +or result produced or otherwise obtained by using the program, +regardless of whether they have been advised of, or otherwise had +knowledge of, the possibility of such damages at any time during the +project or thereafter. Each user will be deemed to have agreed to the +foregoing by his or her commencement of use of the program. The term +"use" as used herein includes, but is not limited to, the use, +modification, copying and distribution of the program and the +production of secondary products from the program. + +In the case where the program, whether in its original form or +modified, was distributed or delivered to or received by a user from +any person, organization or entity other than ICOT, unless it makes or +grants independently of ICOT any specific warranty to the user in +writing, such person, organization or entity, will also be exempted +from and not be held liable to the user for any such damages as noted +above as far as the program is concerned. diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..5ad5644d679 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +7e711a007cd1588f8118eb02803381d448ae087c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.0.jar.sha1 new file mode 100644 index 00000000000..88f43752dba --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.0.jar.sha1 @@ -0,0 +1 @@ +88e5993f73c102f378c711f6e47221b7a9e22d25 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.0.jar.sha1 new file mode 100644 index 00000000000..ec449346c7b --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.0.jar.sha1 @@ -0,0 +1 @@ +94167b64752138a246cc33cbf1a3b0bfe5274b7c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/index/analysis/UkrainianAnalyzerProvider.java b/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/index/analysis/UkrainianAnalyzerProvider.java new file mode 100644 index 00000000000..45bf27b954b --- /dev/null +++ b/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/index/analysis/UkrainianAnalyzerProvider.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.CharArraySet; +import org.apache.lucene.analysis.uk.UkrainianMorfologikAnalyzer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; + +public class UkrainianAnalyzerProvider extends AbstractIndexAnalyzerProvider { + + private final UkrainianMorfologikAnalyzer analyzer; + + public UkrainianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(indexSettings, name, settings); + analyzer = new UkrainianMorfologikAnalyzer(Analysis.parseStopWords(env, settings, UkrainianMorfologikAnalyzer.getDefaultStopSet()), + Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); + analyzer.setVersion(version); + } + + @Override + public UkrainianMorfologikAnalyzer get() { + return this.analyzer; + } + + +} diff --git a/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/plugin/analysis/ukrainian/AnalysisUkrainianPlugin.java b/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/plugin/analysis/ukrainian/AnalysisUkrainianPlugin.java new file mode 100644 index 00000000000..ff8425e201c --- /dev/null +++ b/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/plugin/analysis/ukrainian/AnalysisUkrainianPlugin.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugin.analysis.ukrainian; + +import org.apache.lucene.analysis.Analyzer; +import org.elasticsearch.index.analysis.AnalyzerProvider; +import org.elasticsearch.index.analysis.UkrainianAnalyzerProvider; +import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider; +import org.elasticsearch.plugins.AnalysisPlugin; +import org.elasticsearch.plugins.Plugin; + +import java.util.Map; + +import static java.util.Collections.singletonMap; + +public class AnalysisUkrainianPlugin extends Plugin implements AnalysisPlugin { + + @Override + public Map>> getAnalyzers() { + return singletonMap("ukrainian", UkrainianAnalyzerProvider::new); + } +} diff --git a/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/SimpleUkrainianAnalyzerTests.java b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/SimpleUkrainianAnalyzerTests.java new file mode 100644 index 00000000000..6dbc37ea4ab --- /dev/null +++ b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/SimpleUkrainianAnalyzerTests.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class SimpleUkrainianAnalyzerTests extends ESTestCase { + + public void testBasicUsage() throws Exception { + testAnalyzer("чергу", "черга"); + testAnalyzer("рухається", "рухатися"); + testAnalyzer("колу", "кола", "коло", "кіл"); + testAnalyzer("Ця п'єса у свою чергу рухається по колу.", "п'єса", "черга", "рухатися", "кола", "коло", "кіл"); + } + + private static void testAnalyzer(String source, String... expected_terms) throws IOException { + TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisUkrainianPlugin()); + Analyzer analyzer = analysis.indexAnalyzers.get("ukrainian").analyzer(); + TokenStream ts = analyzer.tokenStream("test", source); + CharTermAttribute term1 = ts.addAttribute(CharTermAttribute.class); + ts.reset(); + for (String expected : expected_terms) { + assertThat(ts.incrementToken(), equalTo(true)); + assertThat(term1.toString(), equalTo(expected)); + } + assertThat(ts.incrementToken(), equalTo(false)); + } + +} diff --git a/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianAnalysisTests.java b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianAnalysisTests.java new file mode 100644 index 00000000000..a45549c22bd --- /dev/null +++ b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianAnalysisTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.uk.UkrainianMorfologikAnalyzer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; + +import static org.hamcrest.Matchers.instanceOf; + +public class UkrainianAnalysisTests extends ESTestCase { + + public void testDefaultsUkranianAnalysis() throws IOException { + final TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, + new AnalysisUkrainianPlugin()); + + Analyzer analyzer = analysis.indexAnalyzers.get("ukrainian").analyzer(); + MatcherAssert.assertThat(analyzer, instanceOf(UkrainianMorfologikAnalyzer.class)); + } +} diff --git a/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianClientYamlTestSuiteIT.java b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianClientYamlTestSuiteIT.java new file mode 100644 index 00000000000..dd77fdf74a5 --- /dev/null +++ b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianClientYamlTestSuiteIT.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.test.rest.yaml.parser.ClientYamlTestParseException; + +import java.io.IOException; + +public class UkrainianClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public UkrainianClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws IOException, ClientYamlTestParseException { + return ESClientYamlSuiteTestCase.createParameters(); + } +} + diff --git a/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/10_basic.yaml b/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/10_basic.yaml new file mode 100644 index 00000000000..48d513c140c --- /dev/null +++ b/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/10_basic.yaml @@ -0,0 +1,18 @@ +# Integration tests for Ukrainian analyzer +--- +"Analyzer": + - do: + indices.analyze: + body: + text: колу + analyzer: ukrainian + - length: { tokens: 3 } + - match: { tokens.0.token: кола } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 4 } + - match: { tokens.1.token: коло } + - match: { tokens.1.start_offset: 0 } + - match: { tokens.1.end_offset: 4 } + - match: { tokens.2.token: кіл } + - match: { tokens.2.start_offset: 0 } + - match: { tokens.2.end_offset: 4 } diff --git a/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/20_search.yaml b/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/20_search.yaml new file mode 100644 index 00000000000..34d8fd2fde7 --- /dev/null +++ b/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/20_search.yaml @@ -0,0 +1,32 @@ +# Integration tests for Stempel analysis component +# +--- +"Index Stempel content": + - do: + indices.create: + index: test + body: + mappings: + type: + properties: + text: + type: text + analyzer: ukrainian + + - do: + index: + index: test + type: type + id: 1 + body: { "text": "Ця п'єса у свою чергу рухається по колу." } + - do: + indices.refresh: {} + + - do: + search: + index: test + body: + query: + match: + text: кола + - match: { hits.total: 1 } diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java deleted file mode 100644 index 7ee62dd8776..00000000000 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/AzureDiscoveryModule.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cloud.azure.classic; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; -import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceImpl; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin; - -/** - * Azure Module - * - *
      - *
    • If needed this module will bind azure discovery service by default - * to AzureComputeServiceImpl.
    • - *
    - * - * @see AzureComputeServiceImpl - */ -public class AzureDiscoveryModule extends AbstractModule { - protected final Logger logger; - private Settings settings; - - // pkg private so it is settable by tests - Class computeServiceImpl = AzureComputeServiceImpl.class; - - @Inject - public AzureDiscoveryModule(Settings settings) { - this.settings = settings; - this.logger = Loggers.getLogger(getClass(), settings); - } - - @Override - protected void configure() { - logger.debug("starting azure services"); - // If we have set discovery to azure, let's start the azure compute service - if (isDiscoveryReady(settings, logger)) { - logger.debug("starting azure discovery service"); - bind(AzureComputeService.class).to(computeServiceImpl).asEagerSingleton(); - } - } - - /** - * Check if discovery is meant to start - * @return true if we can start discovery features - */ - public static boolean isDiscoveryReady(Settings settings, Logger logger) { - // User set discovery.type: azure - if (!AzureDiscoveryPlugin.AZURE.equalsIgnoreCase(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))) { - logger.trace("discovery.type not set to {}", AzureDiscoveryPlugin.AZURE); - return false; - } - - if (isDefined(settings, AzureComputeService.Management.SUBSCRIPTION_ID_SETTING) && - isDefined(settings, AzureComputeService.Management.SERVICE_NAME_SETTING) && - isDefined(settings, AzureComputeService.Management.KEYSTORE_PATH_SETTING) && - isDefined(settings, AzureComputeService.Management.KEYSTORE_PASSWORD_SETTING)) { - logger.trace("All required properties for Azure discovery are set!"); - return true; - } else { - logger.debug("One or more Azure discovery settings are missing. " + - "Check elasticsearch.yml file. Should have [{}], [{}], [{}] and [{}].", - AzureComputeService.Management.SUBSCRIPTION_ID_SETTING.getKey(), - AzureComputeService.Management.SERVICE_NAME_SETTING.getKey(), - AzureComputeService.Management.KEYSTORE_PATH_SETTING.getKey(), - AzureComputeService.Management.KEYSTORE_PASSWORD_SETTING.getKey()); - return false; - } - } - - private static boolean isDefined(Settings settings, Setting property) throws ElasticsearchException { - return (property.exists(settings) && Strings.hasText(property.get(settings))); - } -} diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java index 2375db2502b..09519b14499 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java @@ -19,6 +19,9 @@ package org.elasticsearch.cloud.azure.classic.management; +import java.io.IOException; +import java.util.ServiceLoader; + import com.microsoft.windowsazure.Configuration; import com.microsoft.windowsazure.core.Builder; import com.microsoft.windowsazure.core.DefaultBuilder; @@ -29,30 +32,24 @@ import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDeta import com.microsoft.windowsazure.management.configuration.ManagementConfiguration; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cloud.azure.classic.AzureServiceRemoteException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import java.io.IOException; -import java.util.ServiceLoader; - -/** - * - */ public class AzureComputeServiceImpl extends AbstractLifecycleComponent implements AzureComputeService { private final ComputeManagementClient client; private final String serviceName; - @Inject public AzureComputeServiceImpl(Settings settings) { super(settings); - String subscriptionId = Management.SUBSCRIPTION_ID_SETTING.get(settings); + String subscriptionId = getRequiredSetting(settings, Management.SUBSCRIPTION_ID_SETTING); - serviceName = Management.SERVICE_NAME_SETTING.get(settings); - String keystorePath = Management.KEYSTORE_PATH_SETTING.get(settings); - String keystorePassword = Management.KEYSTORE_PASSWORD_SETTING.get(settings); + serviceName = getRequiredSetting(settings, Management.SERVICE_NAME_SETTING); + String keystorePath = getRequiredSetting(settings, Management.KEYSTORE_PATH_SETTING); + String keystorePassword = getRequiredSetting(settings, Management.KEYSTORE_PASSWORD_SETTING); KeyStoreType keystoreType = Management.KEYSTORE_TYPE_SETTING.get(settings); logger.trace("creating new Azure client for [{}], [{}]", subscriptionId, serviceName); @@ -82,6 +79,14 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent } } + private static String getRequiredSetting(Settings settings, Setting setting) { + String value = setting.get(settings); + if (value == null || Strings.hasLength(value) == false) { + throw new IllegalArgumentException("Missing required setting " + setting.getKey() + " for azure"); + } + return value; + } + @Override public HostedServiceGetDetailedResponse getServiceDetails() { try { diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java index ed327a3a727..1ef7653914b 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java @@ -39,7 +39,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -51,9 +51,6 @@ import java.util.List; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -/** - * - */ public class AzureUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { public enum HostType { @@ -110,10 +107,8 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic private final String deploymentName; private final DeploymentSlot deploymentSlot; - @Inject public AzureUnicastHostsProvider(Settings settings, AzureComputeService azureComputeService, - TransportService transportService, - NetworkService networkService) { + TransportService transportService, NetworkService networkService) { super(settings); this.azureComputeService = azureComputeService; this.transportService = transportService; diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index db5c1cc5c42..1c27a9da0af 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -19,47 +19,65 @@ package org.elasticsearch.plugin.discovery.azure.classic; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.cloud.azure.classic.AzureDiscoveryModule; -import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.discovery.azure.classic.AzureUnicastHostsProvider; -import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.plugins.Plugin; - import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.function.Supplier; -public class AzureDiscoveryPlugin extends Plugin { +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceImpl; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.discovery.azure.classic.AzureUnicastHostsProvider; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.ZenPing; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class AzureDiscoveryPlugin extends Plugin implements DiscoveryPlugin { public static final String AZURE = "azure"; - private final Settings settings; - protected final Logger logger = Loggers.getLogger(AzureDiscoveryPlugin.class); + protected final Settings settings; + private static final Logger logger = Loggers.getLogger(AzureDiscoveryPlugin.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); public AzureDiscoveryPlugin(Settings settings) { this.settings = settings; - DeprecationLogger deprecationLogger = new DeprecationLogger(logger); deprecationLogger.deprecated("azure classic discovery plugin is deprecated. Use azure arm discovery plugin instead"); logger.trace("starting azure classic discovery plugin..."); } - @Override - public Collection createGuiceModules() { - return Collections.singletonList((Module) new AzureDiscoveryModule(settings)); + // overrideable for tests + protected AzureComputeService createComputeService() { + return new AzureComputeServiceImpl(settings); } - public void onModule(DiscoveryModule discoveryModule) { - if (AzureDiscoveryModule.isDiscoveryReady(settings, logger)) { - discoveryModule.addDiscoveryType(AZURE, ZenDiscovery.class); - discoveryModule.addUnicastHostProvider(AZURE, AzureUnicastHostsProvider.class); - } + @Override + public Map> getZenHostsProviders(TransportService transportService, + NetworkService networkService) { + return Collections.singletonMap(AZURE, + () -> new AzureUnicastHostsProvider(settings, createComputeService(), transportService, networkService)); + } + + @Override + public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, ZenPing zenPing) { + // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider + return Collections.singletonMap(AZURE, () -> + new ZenDiscovery(settings, threadPool, transportService, clusterService, clusterService.getClusterSettings(), zenPing)); } @Override @@ -76,4 +94,19 @@ public class AzureDiscoveryPlugin extends Plugin { AzureComputeService.Discovery.ENDPOINT_NAME_SETTING); } + @Override + public Settings additionalSettings() { + // For 5.0, the hosts provider was "zen", but this was before the discovery.zen.hosts_provider + // setting existed. This check looks for the legacy setting, and sets hosts provider if set + String discoveryType = DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings); + if (discoveryType.equals(AZURE)) { + deprecationLogger.deprecated("Using " + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + + " setting to set hosts provider is deprecated. " + + "Set \"" + DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey() + ": " + AZURE + "\" instead"); + if (DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.exists(settings) == false) { + return Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), AZURE).build(); + } + } + return Settings.EMPTY; + } } diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java index d7c3b920780..c9496b1ead4 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java @@ -57,7 +57,7 @@ public abstract class AbstractAzureComputeServiceTestCase extends ESIntegTestCas @Override protected Collection> nodePlugins() { - return Arrays.asList(AzureDiscoveryPlugin.class, mockPlugin); + return Arrays.asList(mockPlugin); } protected void checkNumberOfNodes(int expected) { diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceSimpleMock.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceSimpleMock.java index 66e853b5953..2fbb4d7c4d7 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceSimpleMock.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceSimpleMock.java @@ -19,32 +19,35 @@ package org.elasticsearch.cloud.azure.classic; +import java.net.InetAddress; + import com.microsoft.windowsazure.management.compute.models.DeploymentSlot; import com.microsoft.windowsazure.management.compute.models.DeploymentStatus; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import com.microsoft.windowsazure.management.compute.models.InstanceEndpoint; import com.microsoft.windowsazure.management.compute.models.RoleInstance; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceAbstractMock; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.plugins.Plugin; - -import java.net.InetAddress; +import org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin; /** * Mock Azure API with a single started node */ public class AzureComputeServiceSimpleMock extends AzureComputeServiceAbstractMock { - public static class TestPlugin extends Plugin { - public void onModule(AzureDiscoveryModule azureDiscoveryModule) { - azureDiscoveryModule.computeServiceImpl = AzureComputeServiceSimpleMock.class; + public static class TestPlugin extends AzureDiscoveryPlugin { + public TestPlugin(Settings settings) { + super(settings); + } + @Override + protected AzureComputeService createComputeService() { + return new AzureComputeServiceSimpleMock(settings); } } - @Inject - public AzureComputeServiceSimpleMock(Settings settings) { + private AzureComputeServiceSimpleMock(Settings settings) { super(settings); } diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceTwoNodesMock.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceTwoNodesMock.java index d75ce22d55c..5b787e1ce44 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceTwoNodesMock.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AzureComputeServiceTwoNodesMock.java @@ -19,20 +19,19 @@ package org.elasticsearch.cloud.azure.classic; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Arrays; + import com.microsoft.windowsazure.management.compute.models.DeploymentSlot; import com.microsoft.windowsazure.management.compute.models.DeploymentStatus; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import com.microsoft.windowsazure.management.compute.models.InstanceEndpoint; import com.microsoft.windowsazure.management.compute.models.RoleInstance; +import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceAbstractMock; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; - -import java.net.InetAddress; -import java.util.ArrayList; -import java.util.Arrays; +import org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin; import static org.elasticsearch.common.util.CollectionUtils.newSingletonArrayList; @@ -41,18 +40,19 @@ import static org.elasticsearch.common.util.CollectionUtils.newSingletonArrayLis * Mock Azure API with two started nodes */ public class AzureComputeServiceTwoNodesMock extends AzureComputeServiceAbstractMock { - public static class TestPlugin extends Plugin { - public void onModule(AzureDiscoveryModule azureDiscoveryModule) { - azureDiscoveryModule.computeServiceImpl = AzureComputeServiceTwoNodesMock.class; + + public static class TestPlugin extends AzureDiscoveryPlugin { + public TestPlugin(Settings settings) { + super(settings); + } + @Override + protected AzureComputeService createComputeService() { + return new AzureComputeServiceTwoNodesMock(settings); } } - NetworkService networkService; - - @Inject - protected AzureComputeServiceTwoNodesMock(Settings settings, NetworkService networkService) { + private AzureComputeServiceTwoNodesMock(Settings settings) { super(settings); - this.networkService = networkService; } @Override diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceAbstractMock.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceAbstractMock.java index 02de9db6d69..de63d0d77e6 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceAbstractMock.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceAbstractMock.java @@ -23,9 +23,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Settings; -/** - * - */ public abstract class AzureComputeServiceAbstractMock extends AbstractLifecycleComponent implements AzureComputeService { diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index b3f1afb35a1..9e17ca21868 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -66,7 +66,6 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; -@ESIntegTestCase.SuppressLocalMode @ESIntegTestCase.ClusterScope(numDataNodes = 2, numClientNodes = 0) @SuppressForbidden(reason = "use http server") // TODO this should be a IT but currently all ITs in this project run against a real cluster diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/DiscoveryAzureClassicClientYamlTestSuiteIT.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/DiscoveryAzureClassicClientYamlTestSuiteIT.java index 30276c16c89..33c5d41f70c 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/DiscoveryAzureClassicClientYamlTestSuiteIT.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/DiscoveryAzureClassicClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class DiscoveryAzureClassicClientYamlTestSuiteIT extends ESClientYamlSuit @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java index 1a4bf278f3a..07f1c4f3c19 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java @@ -19,6 +19,10 @@ package org.elasticsearch.cloud.aws; +import java.io.Closeable; +import java.io.IOException; +import java.util.Random; + import com.amazonaws.AmazonClientException; import com.amazonaws.AmazonWebServiceRequest; import com.amazonaws.ClientConfiguration; @@ -31,25 +35,17 @@ import com.amazonaws.retry.RetryPolicy; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2Client; import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; -import java.util.Random; - -/** - * - */ -public class AwsEc2ServiceImpl extends AbstractLifecycleComponent implements AwsEc2Service { +public class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service, Closeable { public static final String EC2_METADATA_URL = "http://169.254.169.254/latest/meta-data/"; private AmazonEC2Client client; - @Inject public AwsEc2ServiceImpl(Settings settings) { super(settings); } @@ -198,15 +194,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent implements Aws } @Override - protected void doStart() throws ElasticsearchException { - } - - @Override - protected void doStop() throws ElasticsearchException { - } - - @Override - protected void doClose() throws ElasticsearchException { + public void close() throws IOException { if (client != null) { client.shutdown(); } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java index 6d4fcdc4c8d..c3b7731692b 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java @@ -19,6 +19,12 @@ package org.elasticsearch.discovery.ec2; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + import com.amazonaws.AmazonClientException; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.model.DescribeInstancesRequest; @@ -34,12 +40,11 @@ import org.elasticsearch.cloud.aws.AwsEc2Service; import org.elasticsearch.cloud.aws.AwsEc2Service.DISCOVERY_EC2; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; @@ -52,9 +57,6 @@ import static java.util.Collections.disjoint; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -/** - * - */ public class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { private final TransportService transportService; @@ -73,7 +75,6 @@ public class AwsEc2UnicastHostsProvider extends AbstractComponent implements Uni private final DiscoNodesCache discoNodes; - @Inject public AwsEc2UnicastHostsProvider(Settings settings, TransportService transportService, AwsEc2Service awsEc2Service) { super(settings); this.transportService = transportService; diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java index 7f8e983e52b..09ab7569f3d 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java @@ -19,26 +19,9 @@ package org.elasticsearch.plugin.discovery.ec2; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.SpecialPermission; -import org.elasticsearch.cloud.aws.AwsEc2Service; -import org.elasticsearch.cloud.aws.AwsEc2ServiceImpl; -import org.elasticsearch.cloud.aws.Ec2Module; -import org.elasticsearch.cloud.aws.network.Ec2NameResolver; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider; -import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.node.Node; -import org.elasticsearch.plugins.DiscoveryPlugin; -import org.elasticsearch.plugins.Plugin; - +import com.amazonaws.util.json.Jackson; import java.io.BufferedReader; +import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; @@ -48,22 +31,44 @@ import java.net.URLConnection; import java.nio.charset.StandardCharsets; import java.security.AccessController; import java.security.PrivilegedAction; -import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; +import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.function.Supplier; -/** - * - */ -public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin { +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.cloud.aws.AwsEc2Service; +import org.elasticsearch.cloud.aws.AwsEc2ServiceImpl; +import org.elasticsearch.cloud.aws.network.Ec2NameResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.ZenPing; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Closeable { private static Logger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); public static final String EC2 = "ec2"; - // ClientConfiguration clinit has some classloader problems - // TODO: fix that static { SecurityManager sm = System.getSecurityManager(); if (sm != null) { @@ -73,6 +78,10 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin { @Override public Void run() { try { + // kick jackson to do some static caching of declared members info + Jackson.jsonNodeOf("{}"); + // ClientConfiguration clinit has some classloader problems + // TODO: fix that Class.forName("com.amazonaws.ClientConfiguration"); } catch (ClassNotFoundException e) { throw new RuntimeException(e); @@ -83,29 +92,19 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin { } private Settings settings; + // stashed when created in order to properly close + private final SetOnce ec2Service = new SetOnce<>(); public Ec2DiscoveryPlugin(Settings settings) { this.settings = settings; } @Override - public Collection createGuiceModules() { - Collection modules = new ArrayList<>(); - modules.add(new Ec2Module()); - return modules; - } - - @Override - @SuppressWarnings("rawtypes") // Supertype uses rawtype - public Collection> getGuiceServiceClasses() { - Collection> services = new ArrayList<>(); - services.add(AwsEc2ServiceImpl.class); - return services; - } - - public void onModule(DiscoveryModule discoveryModule) { - discoveryModule.addDiscoveryType(EC2, ZenDiscovery.class); - discoveryModule.addUnicastHostProvider(EC2, AwsEc2UnicastHostsProvider.class); + public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, ZenPing zenPing) { + // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider + return Collections.singletonMap(EC2, () -> + new ZenDiscovery(settings, threadPool, transportService, clusterService, clusterService.getClusterSettings(), zenPing)); } @Override @@ -114,6 +113,15 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin { return new Ec2NameResolver(settings); } + @Override + public Map> getZenHostsProviders(TransportService transportService, + NetworkService networkService) { + return Collections.singletonMap(EC2, () -> { + ec2Service.set(new AwsEc2ServiceImpl(settings)); + return new AwsEc2UnicastHostsProvider(settings, transportService, ec2Service.get()); + }); + } + @Override public List> getSettings() { return Arrays.asList( @@ -149,10 +157,25 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin { AwsEc2Service.AUTO_ATTRIBUTE_SETTING); } - /** Adds a node attribute for the ec2 availability zone. */ @Override public Settings additionalSettings() { - return getAvailabilityZoneNodeAttributes(settings, AwsEc2ServiceImpl.EC2_METADATA_URL + "placement/availability-zone"); + Settings.Builder builder = Settings.builder(); + // For 5.0, discovery.type was used prior to the new discovery.zen.hosts_provider + // setting existed. This check looks for the legacy setting, and sets hosts provider if set + String discoveryType = DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings); + if (discoveryType.equals(EC2)) { + deprecationLogger.deprecated("Using " + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + + " setting to set hosts provider is deprecated. " + + "Set \"" + DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey() + ": " + EC2 + "\" instead"); + if (DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.exists(settings) == false) { + builder.put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), EC2).build(); + } + } + + // Adds a node attribute for the ec2 availability zone + String azMetadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + "placement/availability-zone"; + builder.put(getAvailabilityZoneNodeAttributes(settings, azMetadataUrl)); + return builder.build(); } // pkg private for testing @@ -190,4 +213,9 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin { return attrs.build(); } + + @Override + public void close() throws IOException { + IOUtils.close(ec2Service.get()); + } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/CloudAwsClientYamlTestSuiteIT.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/CloudAwsClientYamlTestSuiteIT.java index f5f49c14833..3cd30c187d4 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/CloudAwsClientYamlTestSuiteIT.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/CloudAwsClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class CloudAwsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java index bd69953eb1e..693e765ac2d 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java @@ -57,7 +57,6 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.hamcrest.Matchers.equalTo; -@ESIntegTestCase.SuppressLocalMode @ESIntegTestCase.ClusterScope(supportsDedicatedMasters = false, numDataNodes = 2, numClientNodes = 0) @SuppressForbidden(reason = "use http server") // TODO this should be a IT but currently all ITs in this project run against a real cluster diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index edb062d9e30..055d9df8465 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -23,34 +23,40 @@ import com.amazonaws.services.ec2.model.Tag; import org.elasticsearch.Version; import org.elasticsearch.cloud.aws.AwsEc2Service; import org.elasticsearch.cloud.aws.AwsEc2Service.DISCOVERY_EC2; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.collect.CopyOnWriteHashMap; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.MockTcpTransport; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; -/** - * - */ public class Ec2DiscoveryTests extends ESTestCase { protected static ThreadPool threadPool; protected MockTransportService transportService; + private Map poorMansDNS = new ConcurrentHashMap<>(); @BeforeClass public static void createThreadPool() { @@ -67,7 +73,18 @@ public class Ec2DiscoveryTests extends ESTestCase { @Before public void createTransportService() { - transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + final Transport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Settings.EMPTY, Collections.emptyList()), + Version.CURRENT) { + @Override + public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception { + // we just need to ensure we don't resolve DNS here + return new TransportAddress[] {poorMansDNS.getOrDefault(address, buildNewFakeTransportAddress())}; + } + }; + transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null); } protected List buildDynamicNodes(Settings nodeSettings, int nodes) { @@ -92,6 +109,9 @@ public class Ec2DiscoveryTests extends ESTestCase { public void testPrivateIp() throws InterruptedException { int nodes = randomInt(10); + for (int i = 0; i < nodes; i++) { + poorMansDNS.put(AmazonEC2Mock.PREFIX_PRIVATE_IP + (i+1), buildNewFakeTransportAddress()); + } Settings nodeSettings = Settings.builder() .put(DISCOVERY_EC2.HOST_TYPE_SETTING.getKey(), "private_ip") .build(); @@ -101,13 +121,16 @@ public class Ec2DiscoveryTests extends ESTestCase { int node = 1; for (DiscoveryNode discoveryNode : discoveryNodes) { TransportAddress address = discoveryNode.getAddress(); - TransportAddress expected = new LocalTransportAddress(AmazonEC2Mock.PREFIX_PRIVATE_IP + node++); - assertThat(address.sameHost(expected), is(true)); + TransportAddress expected = poorMansDNS.get(AmazonEC2Mock.PREFIX_PRIVATE_IP + node++); + assertEquals(address, expected); } } public void testPublicIp() throws InterruptedException { int nodes = randomInt(10); + for (int i = 0; i < nodes; i++) { + poorMansDNS.put(AmazonEC2Mock.PREFIX_PUBLIC_IP + (i+1), buildNewFakeTransportAddress()); + } Settings nodeSettings = Settings.builder() .put(DISCOVERY_EC2.HOST_TYPE_SETTING.getKey(), "public_ip") .build(); @@ -117,13 +140,18 @@ public class Ec2DiscoveryTests extends ESTestCase { int node = 1; for (DiscoveryNode discoveryNode : discoveryNodes) { TransportAddress address = discoveryNode.getAddress(); - TransportAddress expected = new LocalTransportAddress(AmazonEC2Mock.PREFIX_PUBLIC_IP + node++); - assertThat(address.sameHost(expected), is(true)); + TransportAddress expected = poorMansDNS.get(AmazonEC2Mock.PREFIX_PUBLIC_IP + node++); + assertEquals(address, expected); } } public void testPrivateDns() throws InterruptedException { int nodes = randomInt(10); + for (int i = 0; i < nodes; i++) { + String instanceId = "node" + (i+1); + poorMansDNS.put(AmazonEC2Mock.PREFIX_PRIVATE_DNS + instanceId + + AmazonEC2Mock.SUFFIX_PRIVATE_DNS, buildNewFakeTransportAddress()); + } Settings nodeSettings = Settings.builder() .put(DISCOVERY_EC2.HOST_TYPE_SETTING.getKey(), "private_dns") .build(); @@ -134,14 +162,19 @@ public class Ec2DiscoveryTests extends ESTestCase { for (DiscoveryNode discoveryNode : discoveryNodes) { String instanceId = "node" + node++; TransportAddress address = discoveryNode.getAddress(); - TransportAddress expected = new LocalTransportAddress( + TransportAddress expected = poorMansDNS.get( AmazonEC2Mock.PREFIX_PRIVATE_DNS + instanceId + AmazonEC2Mock.SUFFIX_PRIVATE_DNS); - assertThat(address.sameHost(expected), is(true)); + assertEquals(address, expected); } } public void testPublicDns() throws InterruptedException { int nodes = randomInt(10); + for (int i = 0; i < nodes; i++) { + String instanceId = "node" + (i+1); + poorMansDNS.put(AmazonEC2Mock.PREFIX_PUBLIC_DNS + instanceId + + AmazonEC2Mock.SUFFIX_PUBLIC_DNS, buildNewFakeTransportAddress()); + } Settings nodeSettings = Settings.builder() .put(DISCOVERY_EC2.HOST_TYPE_SETTING.getKey(), "public_dns") .build(); @@ -152,9 +185,9 @@ public class Ec2DiscoveryTests extends ESTestCase { for (DiscoveryNode discoveryNode : discoveryNodes) { String instanceId = "node" + node++; TransportAddress address = discoveryNode.getAddress(); - TransportAddress expected = new LocalTransportAddress( + TransportAddress expected = poorMansDNS.get( AmazonEC2Mock.PREFIX_PUBLIC_DNS + instanceId + AmazonEC2Mock.SUFFIX_PUBLIC_DNS); - assertThat(address.sameHost(expected), is(true)); + assertEquals(address, expected); } } diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java index f781a3b7fe9..d93725a03c3 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java @@ -19,13 +19,21 @@ package org.elasticsearch.discovery.file; +import java.util.Collections; +import java.util.Map; +import java.util.function.Supplier; + import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.transport.TransportService; /** * Plugin for providing file-based unicast hosts discovery. The list of unicast hosts @@ -35,17 +43,33 @@ import org.elasticsearch.plugins.Plugin; public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin { private static final Logger logger = Loggers.getLogger(FileBasedDiscoveryPlugin.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); private final Settings settings; public FileBasedDiscoveryPlugin(Settings settings) { this.settings = settings; - logger.trace("starting file-based discovery plugin..."); } - public void onModule(DiscoveryModule discoveryModule) { - logger.trace("registering file-based unicast hosts provider"); - // using zen discovery for the discovery type and we're just adding a unicast host provider for it - discoveryModule.addUnicastHostProvider("zen", FileBasedUnicastHostsProvider.class); + @Override + public Map> getZenHostsProviders(TransportService transportService, + NetworkService networkService) { + return Collections.singletonMap("file", () -> new FileBasedUnicastHostsProvider(settings, transportService)); + } + + @Override + public Settings additionalSettings() { + // For 5.0, the hosts provider was "zen", but this was before the discovery.zen.hosts_provider + // setting existed. This check looks for the legacy zen, and sets the file hosts provider if not set + String discoveryType = DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings); + if (discoveryType.equals("zen")) { + deprecationLogger.deprecated("Using " + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + + " setting to set hosts provider is deprecated. " + + "Set \"" + DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey() + ": file\" instead"); + if (DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.exists(settings) == false) { + return Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "file").build(); + } + } + return Settings.EMPTY; } } diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java index 78393d34001..d7323d43acc 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java @@ -25,7 +25,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.env.Environment; import org.elasticsearch.transport.TransportService; @@ -41,7 +41,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing.resolveDiscoveryNodes; +import static org.elasticsearch.discovery.zen.UnicastZenPing.resolveDiscoveryNodes; /** * An implementation of {@link UnicastHostsProvider} that reads hosts/ports @@ -55,7 +55,7 @@ import static org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing.resolv * 67.81.244.11:9305 * 67.81.244.15:9400 */ -public class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { +class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; static final String UNICAST_HOST_PREFIX = "#zen_file_unicast_host_"; @@ -66,8 +66,7 @@ public class FileBasedUnicastHostsProvider extends AbstractComponent implements private final AtomicLong nodeIdGenerator = new AtomicLong(); // generates unique ids for the node - @Inject - public FileBasedUnicastHostsProvider(Settings settings, TransportService transportService) { + FileBasedUnicastHostsProvider(Settings settings, TransportService transportService) { super(settings); this.transportService = transportService; this.unicastHostsFilePath = new Environment(settings).configFile().resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryClientYamlTestSuiteIT.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryClientYamlTestSuiteIT.java index 45905a152ce..8a0bd808dba 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryClientYamlTestSuiteIT.java +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryClientYamlTestSuiteIT.java @@ -38,6 +38,6 @@ public class FileBasedDiscoveryClientYamlTestSuiteIT extends ESClientYamlSuiteTe @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginTests.java new file mode 100644 index 00000000000..3dc378f6cb0 --- /dev/null +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginTests.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.file; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.test.ESTestCase; + +public class FileBasedDiscoveryPluginTests extends ESTestCase { + + public void testHostsProviderBwc() { + FileBasedDiscoveryPlugin plugin = new FileBasedDiscoveryPlugin(Settings.EMPTY); + Settings additionalSettings = plugin.additionalSettings(); + assertEquals("file", additionalSettings.get(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey())); + } + + public void testHostsProviderExplicit() { + Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "foo").build(); + FileBasedDiscoveryPlugin plugin = new FileBasedDiscoveryPlugin(settings); + assertEquals(Settings.EMPTY, plugin.additionalSettings()); + } +} \ No newline at end of file diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java index f38ae218ec0..ffb9726d264 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java @@ -74,20 +74,21 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), new NetworkService(Settings.EMPTY, Collections.emptyList())); - transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null); } public void testBuildDynamicNodes() throws Exception { final List hostEntries = Arrays.asList("#comment, should be ignored", "192.168.0.1", "192.168.0.2:9305", "255.255.23.15"); final List nodes = setupAndRunHostProvider(hostEntries); assertEquals(hostEntries.size() - 1, nodes.size()); // minus 1 because we are ignoring the first line that's a comment - assertEquals("192.168.0.1", nodes.get(0).getAddress().getHost()); + assertEquals("192.168.0.1", nodes.get(0).getAddress().getAddress()); assertEquals(9300, nodes.get(0).getAddress().getPort()); assertEquals(UNICAST_HOST_PREFIX + "1#", nodes.get(0).getId()); - assertEquals("192.168.0.2", nodes.get(1).getAddress().getHost()); + assertEquals("192.168.0.2", nodes.get(1).getAddress().getAddress()); assertEquals(9305, nodes.get(1).getAddress().getPort()); assertEquals(UNICAST_HOST_PREFIX + "2#", nodes.get(1).getId()); - assertEquals("255.255.23.15", nodes.get(2).getAddress().getHost()); + assertEquals("255.255.23.15", nodes.get(2).getAddress().getAddress()); assertEquals(9300, nodes.get(2).getAddress().getPort()); assertEquals(UNICAST_HOST_PREFIX + "3#", nodes.get(2).getId()); } @@ -117,7 +118,7 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { List hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301"); List nodes = setupAndRunHostProvider(hostEntries); assertEquals(1, nodes.size()); // only one of the two is valid and will be used - assertEquals("192.168.0.1", nodes.get(0).getAddress().getHost()); + assertEquals("192.168.0.1", nodes.get(0).getAddress().getAddress()); assertEquals(9301, nodes.get(0).getAddress().getPort()); } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java index 39db86c672a..6f7313051fe 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java @@ -25,12 +25,13 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; +import java.io.Closeable; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.function.Function; -public interface GceInstancesService extends LifecycleComponent { +public interface GceInstancesService { /** * GCE API Version: Elasticsearch/GceCloud/1.0 diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java index c6c7b9a0aef..bda68780c79 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java @@ -19,27 +19,7 @@ package org.elasticsearch.cloud.gce; -import com.google.api.client.googleapis.compute.ComputeCredential; -import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; -import com.google.api.client.http.HttpTransport; -import com.google.api.client.http.javanet.NetHttpTransport; -import com.google.api.client.json.JsonFactory; -import com.google.api.client.json.jackson2.JacksonFactory; -import com.google.api.services.compute.Compute; -import com.google.api.services.compute.model.Instance; -import com.google.api.services.compute.model.InstanceList; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.SpecialPermission; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.gce.RetryHttpInitializerWrapper; - +import java.io.Closeable; import java.io.IOException; import java.security.AccessController; import java.security.GeneralSecurityException; @@ -51,7 +31,26 @@ import java.util.Collections; import java.util.List; import java.util.function.Function; -public class GceInstancesServiceImpl extends AbstractLifecycleComponent implements GceInstancesService { +import com.google.api.client.googleapis.compute.ComputeCredential; +import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; +import com.google.api.client.http.HttpTransport; +import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.api.client.json.JsonFactory; +import com.google.api.client.json.jackson2.JacksonFactory; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.Instance; +import com.google.api.services.compute.model.InstanceList; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.gce.RetryHttpInitializerWrapper; + +public class GceInstancesServiceImpl extends AbstractComponent implements GceInstancesService, Closeable { // all settings just used for testing - not registered by default public static final Setting GCE_VALIDATE_CERTIFICATES = @@ -113,7 +112,6 @@ public class GceInstancesServiceImpl extends AbstractLifecycleComponent implemen private final boolean validateCerts; - @Inject public GceInstancesServiceImpl(Settings settings) { super(settings); this.project = PROJECT_SETTING.get(settings); @@ -204,22 +202,9 @@ public class GceInstancesServiceImpl extends AbstractLifecycleComponent implemen } @Override - protected void doStart() throws ElasticsearchException { - } - - @Override - protected void doStop() throws ElasticsearchException { + public void close() throws IOException { if (gceHttpTransport != null) { - try { - gceHttpTransport.shutdown(); - } catch (IOException e) { - logger.warn("unable to shutdown GCE Http Transport", e); - } - gceHttpTransport = null; + gceHttpTransport.shutdown(); } } - - @Override - protected void doClose() throws ElasticsearchException { - } } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java index 25b7cf72c92..5145dc2a51c 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java @@ -19,19 +19,6 @@ package org.elasticsearch.cloud.gce; -import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; -import com.google.api.client.http.GenericUrl; -import com.google.api.client.http.HttpHeaders; -import com.google.api.client.http.HttpResponse; -import com.google.api.client.http.HttpTransport; -import org.elasticsearch.SpecialPermission; -import org.elasticsearch.cloud.gce.network.GceNameResolver; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; - import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -41,6 +28,16 @@ import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction; import java.util.function.Function; +import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.http.HttpTransport; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; + public class GceMetadataService extends AbstractLifecycleComponent { // Forcing Google Token API URL as set in GCE SDK to @@ -53,7 +50,6 @@ public class GceMetadataService extends AbstractLifecycleComponent { /** Global instance of the HTTP transport. */ private HttpTransport gceHttpTransport; - @Inject public GceMetadataService(Settings settings) { super(settings); } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java index 3426e74d4a4..4fc4bc418b1 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java @@ -19,6 +19,13 @@ package org.elasticsearch.discovery.gce; +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; + import com.google.api.services.compute.model.AccessConfig; import com.google.api.services.compute.model.Instance; import com.google.api.services.compute.model.NetworkInterface; @@ -29,7 +36,6 @@ import org.elasticsearch.cloud.gce.GceInstancesService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; @@ -37,23 +43,13 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.transport.TransportService; -import java.io.IOException; -import java.net.InetAddress; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.function.Function; - import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -/** - * - */ public class GceUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { /** @@ -78,7 +74,6 @@ public class GceUnicastHostsProvider extends AbstractComponent implements Unicas private long lastRefresh; private List cachedDiscoNodes; - @Inject public GceUnicastHostsProvider(Settings settings, GceInstancesService gceInstancesService, TransportService transportService, NetworkService networkService) { diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index aeec9911824..4d684a1b22c 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -22,23 +22,35 @@ package org.elasticsearch.plugin.discovery.gce; import com.google.api.client.http.HttpHeaders; import com.google.api.client.util.ClassInfo; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.SpecialPermission; import org.elasticsearch.cloud.gce.GceInstancesService; +import org.elasticsearch.cloud.gce.GceInstancesServiceImpl; import org.elasticsearch.cloud.gce.GceMetadataService; import org.elasticsearch.cloud.gce.GceModule; import org.elasticsearch.cloud.gce.network.GceNameResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.gce.GceUnicastHostsProvider; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.ZenPing; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import java.io.Closeable; +import java.io.IOException; import java.security.AccessController; import java.security.PrivilegedAction; import java.util.ArrayList; @@ -46,12 +58,17 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.function.Supplier; -public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin { +public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Closeable { public static final String GCE = "gce"; private final Settings settings; - protected final Logger logger = Loggers.getLogger(GceDiscoveryPlugin.class); + private static final Logger logger = Loggers.getLogger(GceDiscoveryPlugin.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + // stashed when created in order to properly close + private final SetOnce gceInstancesService = new SetOnce<>(); static { /* @@ -81,23 +98,20 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin { } @Override - public Collection createGuiceModules() { - return Collections.singletonList(new GceModule(settings)); + public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, ZenPing zenPing) { + // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider + return Collections.singletonMap(GCE, () -> + new ZenDiscovery(settings, threadPool, transportService, clusterService, clusterService.getClusterSettings(), zenPing)); } @Override - @SuppressWarnings("rawtypes") // Supertype uses raw type - public Collection> getGuiceServiceClasses() { - logger.debug("Register gce compute service"); - Collection> services = new ArrayList<>(); - services.add(GceModule.getComputeServiceImpl()); - return services; - } - - public void onModule(DiscoveryModule discoveryModule) { - logger.debug("Register gce discovery type and gce unicast provider"); - discoveryModule.addDiscoveryType(GCE, ZenDiscovery.class); - discoveryModule.addUnicastHostProvider(GCE, GceUnicastHostsProvider.class); + public Map> getZenHostsProviders(TransportService transportService, + NetworkService networkService) { + return Collections.singletonMap(GCE, () -> { + gceInstancesService.set(new GceInstancesServiceImpl(settings)); + return new GceUnicastHostsProvider(settings, gceInstancesService.get(), transportService, networkService); + }); } @Override @@ -117,4 +131,25 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin { GceInstancesService.RETRY_SETTING, GceInstancesService.MAX_WAIT_SETTING); } + + @Override + public Settings additionalSettings() { + // For 5.0, the hosts provider was "zen", but this was before the discovery.zen.hosts_provider + // setting existed. This check looks for the legacy setting, and sets hosts provider if set + String discoveryType = DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings); + if (discoveryType.equals(GCE)) { + deprecationLogger.deprecated("Using " + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + + " setting to set hosts provider is deprecated. " + + "Set \"" + DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey() + ": " + GCE + "\" instead"); + if (DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.exists(settings) == false) { + return Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), GCE).build(); + } + } + return Settings.EMPTY; + } + + @Override + public void close() throws IOException { + IOUtils.close(gceInstancesService.get()); + } } diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/DiscoveryGceClientYamlTestSuiteIT.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/DiscoveryGceClientYamlTestSuiteIT.java index 8ce17ff9fa5..3af39b6da5d 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/DiscoveryGceClientYamlTestSuiteIT.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/DiscoveryGceClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class DiscoveryGceClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java index 98f6fd0dc1b..1512da2429f 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java @@ -59,7 +59,6 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; -@ESIntegTestCase.SuppressLocalMode @ESIntegTestCase.ClusterScope(supportsDedicatedMasters = false, numDataNodes = 2, numClientNodes = 0) @SuppressForbidden(reason = "use http server") // TODO this should be a IT but currently all ITs in this project run against a real cluster diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java index 08e7ee963d3..886222d43a7 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java @@ -33,6 +33,7 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -94,13 +95,13 @@ public class GceDiscoveryTests extends ESTestCase { @Before public void createTransportService() { - transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); } @After - public void stopGceComputeService() { + public void stopGceComputeService() throws IOException { if (mock != null) { - mock.stop(); + mock.close(); } } diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceInstancesServiceMock.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceInstancesServiceMock.java index 91a37a4d106..d2612ca75ab 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceInstancesServiceMock.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceInstancesServiceMock.java @@ -26,9 +26,6 @@ import org.elasticsearch.common.settings.Settings; import java.io.IOException; import java.security.GeneralSecurityException; -/** - * - */ public class GceInstancesServiceMock extends GceInstancesServiceImpl { protected HttpTransport mockHttpTransport; diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 3d9545d7aed..f22e0fdfeae 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -24,9 +24,9 @@ esplugin { versions << [ 'tika': '1.13', - 'pdfbox': '2.0.1', - 'bouncycastle': '1.54', - 'poi': '3.15-beta1' + 'pdfbox': '2.0.3', + 'bouncycastle': '1.55', + 'poi': '3.15' ] dependencies { @@ -216,7 +216,6 @@ thirdPartyAudit.excludes = [ 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1VTEditList', 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ValList', 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1ValidIdsList', - 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1VisibleList', 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$1WidthMinList', 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2Accel2List', 'com.microsoft.schemas.office.excel.impl.CTClientDataImpl$2AccelList', @@ -506,6 +505,8 @@ thirdPartyAudit.excludes = [ 'opennlp.tools.namefind.TokenNameFinderModel', 'opennlp.tools.util.Span', 'org.apache.avalon.framework.logger.Logger', + 'org.apache.commons.collections4.ListValuedMap', + 'org.apache.commons.collections4.multimap.ArrayListValuedHashMap', 'org.apache.commons.csv.CSVFormat', 'org.apache.commons.csv.CSVParser', 'org.apache.commons.csv.CSVRecord', @@ -630,8 +631,6 @@ thirdPartyAudit.excludes = [ 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1RevocationValuesList', 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1SigAndRefsTimeStampList', 'org.etsi.uri.x01903.v13.impl.UnsignedSignaturePropertiesTypeImpl$1SignatureTimeStampList', - 'org.etsi.uri.x01903.v14.ValidationDataType$Factory', - 'org.etsi.uri.x01903.v14.ValidationDataType', 'org.json.JSONArray', 'org.json.JSONObject', 'org.json.XML', @@ -785,17 +784,16 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.drawingml.x2006.main.CTGlowEffect', 'org.openxmlformats.schemas.drawingml.x2006.main.CTGrayscaleEffect', 'org.openxmlformats.schemas.drawingml.x2006.main.CTGrayscaleTransform', - 'org.openxmlformats.schemas.drawingml.x2006.main.CTGroupFillProperties', 'org.openxmlformats.schemas.drawingml.x2006.main.CTGroupLocking', 'org.openxmlformats.schemas.drawingml.x2006.main.CTHSLEffect', 'org.openxmlformats.schemas.drawingml.x2006.main.CTInnerShadowEffect', 'org.openxmlformats.schemas.drawingml.x2006.main.CTInverseGammaTransform', 'org.openxmlformats.schemas.drawingml.x2006.main.CTInverseTransform', 'org.openxmlformats.schemas.drawingml.x2006.main.CTLineJoinBevel', + 'org.openxmlformats.schemas.drawingml.x2006.main.CTLineJoinMiterProperties', 'org.openxmlformats.schemas.drawingml.x2006.main.CTLuminanceEffect', 'org.openxmlformats.schemas.drawingml.x2006.main.CTObjectStyleDefaults', 'org.openxmlformats.schemas.drawingml.x2006.main.CTPath2DArcTo', - 'org.openxmlformats.schemas.drawingml.x2006.main.CTPatternFillProperties', 'org.openxmlformats.schemas.drawingml.x2006.main.CTPolarAdjustHandle', 'org.openxmlformats.schemas.drawingml.x2006.main.CTPositiveFixedAngle', 'org.openxmlformats.schemas.drawingml.x2006.main.CTPresetShadowEffect', @@ -808,7 +806,7 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.drawingml.x2006.main.CTSoftEdgesEffect', 'org.openxmlformats.schemas.drawingml.x2006.main.CTSupplementalFont', 'org.openxmlformats.schemas.drawingml.x2006.main.CTTableBackgroundStyle', - 'org.openxmlformats.schemas.drawingml.x2006.main.CTTablePartStyle', + 'org.openxmlformats.schemas.drawingml.x2006.main.CTTableCellBorderStyle', 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextBlipBullet', 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextBulletColorFollowText', 'org.openxmlformats.schemas.drawingml.x2006.main.CTTextBulletSizeFollowText', @@ -823,9 +821,10 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.drawingml.x2006.main.STBlackWhiteMode', 'org.openxmlformats.schemas.drawingml.x2006.main.STBlipCompression', 'org.openxmlformats.schemas.drawingml.x2006.main.STFixedAngle', - 'org.openxmlformats.schemas.drawingml.x2006.main.STGuid', 'org.openxmlformats.schemas.drawingml.x2006.main.STPanose', 'org.openxmlformats.schemas.drawingml.x2006.main.STPathFillMode', + 'org.openxmlformats.schemas.drawingml.x2006.main.STPresetPatternVal', + 'org.openxmlformats.schemas.drawingml.x2006.main.STPresetPatternVal$Enum', 'org.openxmlformats.schemas.drawingml.x2006.main.STRectAlignment', 'org.openxmlformats.schemas.drawingml.x2006.main.STTextColumnCount', 'org.openxmlformats.schemas.drawingml.x2006.main.STTextNonNegativePoint', @@ -933,6 +932,34 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1SatOffList', 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1ShadeList', 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTPresetColorImpl$1TintList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1AlphaList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1AlphaModList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1AlphaOffList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1BlueList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1BlueModList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1BlueOffList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1CompList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1GammaList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1GrayList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1GreenList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1GreenModList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1GreenOffList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1HueList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1HueModList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1HueOffList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1InvGammaList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1InvList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1LumList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1LumModList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1LumOffList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1RedList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1RedModList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1RedOffList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1SatList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1SatModList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1SatOffList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1ShadeList', + 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTScRgbColorImpl$1TintList', 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1AlphaList', 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1AlphaModList', 'org.openxmlformats.schemas.drawingml.x2006.main.impl.CTSRgbColorImpl$1AlphaOffList', @@ -1184,6 +1211,8 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTOleObjects', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTOleSize', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPCDKPIs', + 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPageField', + 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPageFields', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPhoneticRun', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPivotFilters', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.CTPivotHierarchies', @@ -1269,7 +1298,6 @@ thirdPartyAudit.excludes = [ 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTMergeCellsImpl$1MergeCellList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTNumFmtsImpl$1NumFmtList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPageBreakImpl$1BrkList', - 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPageFieldsImpl$1PageFieldList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPivotCacheRecordsImpl$1RList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPivotCachesImpl$1PivotCacheList', 'org.openxmlformats.schemas.spreadsheetml.x2006.main.impl.CTPivotFieldsImpl$1PivotFieldList', diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.54.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.54.jar.sha1 deleted file mode 100644 index 79da45c5c42..00000000000 --- a/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.54.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d9b5432b4b29ef4a853223bc6e19379ef116cca \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.55.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.55.jar.sha1 new file mode 100644 index 00000000000..8fdfb8f5807 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.55.jar.sha1 @@ -0,0 +1 @@ +5cea2dada69b98698ea975a5c1dd3c91ac8ffbb6 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.54.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.54.jar.sha1 deleted file mode 100644 index 2d0c3cf4e27..00000000000 --- a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.54.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b11bfee99bb11eea344de6e4a07fe89212c55c02 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.55.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.55.jar.sha1 new file mode 100644 index 00000000000..a4d546be04f --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.55.jar.sha1 @@ -0,0 +1 @@ +6392d8cba22b722c6570d660ca0b3921ff1bae4f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.54.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.54.jar.sha1 deleted file mode 100644 index fcda646b42a..00000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.54.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1acdedeb89f1d950d67b73d481eb7736df65eedb \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.55.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.55.jar.sha1 new file mode 100644 index 00000000000..1c507e17b88 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.55.jar.sha1 @@ -0,0 +1 @@ +935f2e57a00ec2c489cbd2ad830d4a399708f979 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.1.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.1.jar.sha1 deleted file mode 100644 index 0668199b242..00000000000 --- a/plugins/ingest-attachment/licenses/fontbox-2.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b9d4f0993e015f3f1ce0be9e7300cf62dd7a7f15 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.3.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.3.jar.sha1 new file mode 100644 index 00000000000..e3ff3d39459 --- /dev/null +++ b/plugins/ingest-attachment/licenses/fontbox-2.0.3.jar.sha1 @@ -0,0 +1 @@ +448ee588d0136121cf5c4dd397384cccb9db1ad7 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.1.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.1.jar.sha1 deleted file mode 100644 index 1014db34044..00000000000 --- a/plugins/ingest-attachment/licenses/pdfbox-2.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dbc69649118b7eff278f228c070a40ee559e1f62 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.3.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.3.jar.sha1 new file mode 100644 index 00000000000..807e2482ac2 --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-2.0.3.jar.sha1 @@ -0,0 +1 @@ +be7b09de93f7c7795c57f4fbf14db60ab93806b4 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-3.15-beta1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-3.15-beta1.jar.sha1 deleted file mode 100644 index 6049604dd97..00000000000 --- a/plugins/ingest-attachment/licenses/poi-3.15-beta1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -048bb8326b81323631d9ceb4236cfbd382e56da2 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-3.15.jar.sha1 b/plugins/ingest-attachment/licenses/poi-3.15.jar.sha1 new file mode 100644 index 00000000000..5405d32cd01 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-3.15.jar.sha1 @@ -0,0 +1 @@ +965bba8899988008bb2341e300347de62aad5391 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-3.15-beta1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-3.15-beta1.jar.sha1 deleted file mode 100644 index c3cf49d9246..00000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-3.15-beta1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -81085a47fdf0d74d473d605c6b3784e26731842e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-3.15.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-3.15.jar.sha1 new file mode 100644 index 00000000000..4362223dac4 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-3.15.jar.sha1 @@ -0,0 +1 @@ +e2800856735b07b8edd417aee07685470216a00f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.15-beta1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.15-beta1.jar.sha1 deleted file mode 100644 index afd3b676d08..00000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.15-beta1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8bc979ad79908a99483337f1ca2edf78558ac20 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.15.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.15.jar.sha1 new file mode 100644 index 00000000000..393e6f885d9 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-schemas-3.15.jar.sha1 @@ -0,0 +1 @@ +de4a50ca39de48a19606b35644ecadb2f733c479 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-3.15-beta1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-3.15-beta1.jar.sha1 deleted file mode 100644 index 7056a9fa49e..00000000000 --- a/plugins/ingest-attachment/licenses/poi-scratchpad-3.15-beta1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f4e276aaf97a60a1156388c9e38069122b7ea914 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-3.15.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-3.15.jar.sha1 new file mode 100644 index 00000000000..d08f475a3f6 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-scratchpad-3.15.jar.sha1 @@ -0,0 +1 @@ +f1db76ae4a9389fa4339dc3b7f8208aa82c72b04 \ No newline at end of file diff --git a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java index 4e051d05724..3708a290dec 100644 --- a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java +++ b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java @@ -21,8 +21,8 @@ package org.elasticsearch.ingest.attachment; import org.apache.commons.io.IOUtils; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -38,6 +38,7 @@ import java.util.Set; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -125,12 +126,17 @@ public class AttachmentProcessorTests extends ESTestCase { is("application/vnd.openxmlformats-officedocument.wordprocessingml.document")); } + public void testPdf() throws Exception { + Map attachmentData = parseDocument("test.pdf", processor); + assertThat(attachmentData.get("content"), + is("This is a test, with umlauts, from München\n\nAlso contains newlines for testing.\n\nAnd one more.")); + assertThat(attachmentData.get("content_type").toString(), is("application/pdf")); + assertThat(attachmentData.get("content_length"), is(notNullValue())); + } + public void testEncryptedPdf() throws Exception { - try { - parseDocument("encrypted.pdf", processor); - } catch (ElasticsearchParseException e) { - assertThat(e.getDetailedMessage(), containsString("document is encrypted")); - } + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> parseDocument("encrypted.pdf", processor)); + assertThat(e.getDetailedMessage(), containsString("document is encrypted")); } public void testHtmlDocument() throws Exception { diff --git a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/IngestAttachmentClientYamlTestSuiteIT.java b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/IngestAttachmentClientYamlTestSuiteIT.java index d720a4abf28..40e95451e49 100644 --- a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/IngestAttachmentClientYamlTestSuiteIT.java +++ b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/IngestAttachmentClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class IngestAttachmentClientYamlTestSuiteIT extends ESClientYamlSuiteTest @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/ingest-attachment/src/test/resources/org/elasticsearch/ingest/attachment/test/sample-files/test.pdf b/plugins/ingest-attachment/src/test/resources/org/elasticsearch/ingest/attachment/test/sample-files/test.pdf new file mode 100644 index 00000000000..c6e47d74e66 Binary files /dev/null and b/plugins/ingest-attachment/src/test/resources/org/elasticsearch/ingest/attachment/test/sample-files/test.pdf differ diff --git a/plugins/ingest-geoip/build.gradle b/plugins/ingest-geoip/build.gradle index d9e90a61d40..e74a27844db 100644 --- a/plugins/ingest-geoip/build.gradle +++ b/plugins/ingest-geoip/build.gradle @@ -23,10 +23,10 @@ esplugin { } dependencies { - compile ('com.maxmind.geoip2:geoip2:2.7.0') + compile ('com.maxmind.geoip2:geoip2:2.8.0') // geoip2 dependencies: - compile('com.fasterxml.jackson.core:jackson-annotations:2.7.1') - compile('com.fasterxml.jackson.core:jackson-databind:2.7.1') + compile('com.fasterxml.jackson.core:jackson-annotations:2.8.2') + compile('com.fasterxml.jackson.core:jackson-databind:2.8.2') compile('com.maxmind.db:maxmind-db:1.2.1') testCompile 'org.elasticsearch:geolite2-databases:20160608' @@ -50,14 +50,19 @@ bundlePlugin { } thirdPartyAudit.excludes = [ - // geoip WebServiceClient needs Google http client, but we're not using WebServiceClient: - 'com.google.api.client.http.HttpTransport', - 'com.google.api.client.http.GenericUrl', - 'com.google.api.client.http.HttpResponse', - 'com.google.api.client.http.HttpRequestFactory', - 'com.google.api.client.http.HttpRequest', - 'com.google.api.client.http.HttpHeaders', - 'com.google.api.client.http.HttpResponseException', - 'com.google.api.client.http.javanet.NetHttpTransport', - 'com.google.api.client.http.javanet.NetHttpTransport', + // geoip WebServiceClient needs apache http client, but we're not using WebServiceClient: + 'org.apache.http.HttpEntity', + 'org.apache.http.HttpHost', + 'org.apache.http.HttpResponse', + 'org.apache.http.StatusLine', + 'org.apache.http.auth.UsernamePasswordCredentials', + 'org.apache.http.client.config.RequestConfig$Builder', + 'org.apache.http.client.config.RequestConfig', + 'org.apache.http.client.methods.CloseableHttpResponse', + 'org.apache.http.client.methods.HttpGet', + 'org.apache.http.client.utils.URIBuilder', + 'org.apache.http.impl.auth.BasicScheme', + 'org.apache.http.impl.client.CloseableHttpClient', + 'org.apache.http.impl.client.HttpClientBuilder', + 'org.apache.http.util.EntityUtils' ] diff --git a/plugins/ingest-geoip/licenses/geoip2-2.7.0.jar.sha1 b/plugins/ingest-geoip/licenses/geoip2-2.7.0.jar.sha1 deleted file mode 100644 index 2015e311d60..00000000000 --- a/plugins/ingest-geoip/licenses/geoip2-2.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2010d922191f5801939b462a5703ab79a7829626 \ No newline at end of file diff --git a/plugins/ingest-geoip/licenses/geoip2-2.8.0.jar.sha1 b/plugins/ingest-geoip/licenses/geoip2-2.8.0.jar.sha1 new file mode 100644 index 00000000000..c6036686601 --- /dev/null +++ b/plugins/ingest-geoip/licenses/geoip2-2.8.0.jar.sha1 @@ -0,0 +1 @@ +46226778ec32b776e80f282c5bf65b88d36cc0a0 \ No newline at end of file diff --git a/plugins/ingest-geoip/licenses/jackson-annotations-2.7.1.jar.sha1 b/plugins/ingest-geoip/licenses/jackson-annotations-2.7.1.jar.sha1 deleted file mode 100644 index 69b45742d84..00000000000 --- a/plugins/ingest-geoip/licenses/jackson-annotations-2.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b93f301823b79033fcbe873779b3d84f9730fc1 \ No newline at end of file diff --git a/plugins/ingest-geoip/licenses/jackson-annotations-2.8.2.jar.sha1 b/plugins/ingest-geoip/licenses/jackson-annotations-2.8.2.jar.sha1 new file mode 100644 index 00000000000..c3b701dbb86 --- /dev/null +++ b/plugins/ingest-geoip/licenses/jackson-annotations-2.8.2.jar.sha1 @@ -0,0 +1 @@ +a38d544583e90cf163b2e45e4a57f5c54de670d3 \ No newline at end of file diff --git a/plugins/ingest-geoip/licenses/jackson-databind-2.7.1.jar.sha1 b/plugins/ingest-geoip/licenses/jackson-databind-2.7.1.jar.sha1 deleted file mode 100644 index d9b4ca6a79b..00000000000 --- a/plugins/ingest-geoip/licenses/jackson-databind-2.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -14d88822bca655de7aa6ed3e4c498d115505710a \ No newline at end of file diff --git a/plugins/ingest-geoip/licenses/jackson-databind-2.8.2.jar.sha1 b/plugins/ingest-geoip/licenses/jackson-databind-2.8.2.jar.sha1 new file mode 100644 index 00000000000..7e90b5f8e97 --- /dev/null +++ b/plugins/ingest-geoip/licenses/jackson-databind-2.8.2.jar.sha1 @@ -0,0 +1 @@ +1f12816593c1422be957471c98a80bfbace60fa2 \ No newline at end of file diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java index 26838b600da..ed381dab0b6 100644 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class IngestGeoIpClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/IngestUserAgentClientYamlTestSuiteIT.java b/plugins/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/IngestUserAgentClientYamlTestSuiteIT.java index b0aa115a1a2..2acac873637 100644 --- a/plugins/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/IngestUserAgentClientYamlTestSuiteIT.java +++ b/plugins/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/IngestUserAgentClientYamlTestSuiteIT.java @@ -36,6 +36,6 @@ public class IngestUserAgentClientYamlTestSuiteIT extends ESClientYamlSuiteTestC @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java index 68ec25f2205..88ff7609ff9 100644 --- a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java +++ b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.cat.AbstractCatAction; @@ -45,21 +44,18 @@ public class ExampleCatAction extends AbstractCatAction { } @Override - protected void doRequest(final RestRequest request, final RestChannel channel, final NodeClient client) { + protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { Table table = getTableWithHeader(request); table.startRow(); table.addCell(config.getTestConfig()); table.endRow(); - try { - channel.sendResponse(RestTable.buildResponse(table, channel)); - } catch (Exception e) { + return channel -> { try { + channel.sendResponse(RestTable.buildResponse(table, channel)); + } catch (final Exception e) { channel.sendResponse(new BytesRestResponse(channel, e)); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.error("failed to send failure response", inner); } - } + }; } @Override diff --git a/plugins/jvm-example/src/test/java/org/elasticsearch/plugin/example/JvmExampleClientYamlTestSuiteIT.java b/plugins/jvm-example/src/test/java/org/elasticsearch/plugin/example/JvmExampleClientYamlTestSuiteIT.java index 0ef413d9595..b7bae90817b 100644 --- a/plugins/jvm-example/src/test/java/org/elasticsearch/plugin/example/JvmExampleClientYamlTestSuiteIT.java +++ b/plugins/jvm-example/src/test/java/org/elasticsearch/plugin/example/JvmExampleClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class JvmExampleClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java index da3d14cd02b..6af04561e4b 100644 --- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java +++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java @@ -25,7 +25,6 @@ import org.elasticsearch.SpecialPermission; import org.elasticsearch.bootstrap.BootstrapInfo; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ClassPermission; import org.elasticsearch.script.CompiledScript; @@ -61,9 +60,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; -/** - * - */ public class JavaScriptScriptEngineService extends AbstractComponent implements ScriptEngineService { public static final String NAME = "javascript"; diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/NativeList.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/NativeList.java index dc533e5881e..4ef7f9177c5 100644 --- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/NativeList.java +++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/support/NativeList.java @@ -27,9 +27,6 @@ import org.mozilla.javascript.Wrapper; import java.util.Arrays; import java.util.List; -/** - * - */ public class NativeList extends NativeJavaObject implements Scriptable, Wrapper { private static final String LENGTH_PROPERTY = "length"; diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java index 84bc97abfbe..dab2dac6c52 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; @@ -39,9 +39,6 @@ import org.junit.Before; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -/** - * - */ public class JavaScriptScriptEngineTests extends ESTestCase { private JavaScriptScriptEngineService se; @@ -57,12 +54,12 @@ public class JavaScriptScriptEngineTests extends ESTestCase { public void testSimpleEquation() { Map vars = new HashMap(); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "js", se.compile(null, "1 + 2", Collections.emptyMap())), vars).run(); + Object o = se.executable(new CompiledScript(ScriptType.INLINE, "testSimpleEquation", "js", se.compile(null, "1 + 2", Collections.emptyMap())), vars).run(); assertThat(((Number) o).intValue(), equalTo(3)); } public void testNullVars() { - CompiledScript script = new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "js", + CompiledScript script = new CompiledScript(ScriptType.INLINE, "testSimpleEquation", "js", se.compile(null, "1 + 2", emptyMap())); Object o = se.executable(script, null).run(); assertThat(((Number) o).intValue(), equalTo(3)); @@ -75,20 +72,20 @@ public class JavaScriptScriptEngineTests extends ESTestCase { Map obj2 = MapBuilder.newMapBuilder().put("prop2", "value2").map(); Map obj1 = MapBuilder.newMapBuilder().put("prop1", "value1").put("obj2", obj2).put("l", Arrays.asList("2", "1")).map(); vars.put("obj1", obj1); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile(null, "obj1", Collections.emptyMap())), vars).run(); + Object o = se.executable(new CompiledScript(ScriptType.INLINE, "testMapAccess", "js", se.compile(null, "obj1", Collections.emptyMap())), vars).run(); assertThat(o, instanceOf(Map.class)); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile(null, "obj1.l[0]", Collections.emptyMap())), vars).run(); + o = se.executable(new CompiledScript(ScriptType.INLINE, "testMapAccess", "js", se.compile(null, "obj1.l[0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("2")); } @SuppressWarnings("unchecked") public void testJavaScriptObjectToMap() { Map vars = new HashMap(); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptObjectToMap", "js", + Object o = se.executable(new CompiledScript(ScriptType.INLINE, "testJavaScriptObjectToMap", "js", se.compile(null, "var obj1 = {}; obj1.prop1 = 'value1'; obj1.obj2 = {}; obj1.obj2.prop2 = 'value2'; obj1", Collections.emptyMap())), vars).run(); Map obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); @@ -104,7 +101,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { ctx.put("obj1", obj1); vars.put("ctx", ctx); - ExecutableScript executable = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptObjectMapInter", "js", + ExecutableScript executable = se.executable(new CompiledScript(ScriptType.INLINE, "testJavaScriptObjectMapInter", "js", se.compile(null, "ctx.obj2 = {}; ctx.obj2.prop2 = 'value2'; ctx.obj1.prop1 = 'uvalue1'", Collections.emptyMap())), vars); executable.run(); ctx = (Map) executable.unwrap(vars.get("ctx")); @@ -121,7 +118,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { ctx.put("doc", doc); Object compiled = se.compile(null, "ctx.doc.field1 = ['value1', 'value2']", Collections.emptyMap()); - ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptInnerArrayCreation", "js", + ExecutableScript script = se.executable(new CompiledScript(ScriptType.INLINE, "testJavaScriptInnerArrayCreation", "js", compiled), new HashMap()); script.setNextVar("ctx", ctx); script.run(); @@ -138,21 +135,21 @@ public class JavaScriptScriptEngineTests extends ESTestCase { Map obj1 = MapBuilder.newMapBuilder().put("prop1", "value1").put("obj2", obj2).map(); vars.put("l", Arrays.asList("1", "2", "3", obj1)); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", + Object o = se.executable(new CompiledScript(ScriptType.INLINE, "testAccessInScript", "js", se.compile(null, "l.length", Collections.emptyMap())), vars).run(); assertThat(((Number) o).intValue(), equalTo(4)); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", + o = se.executable(new CompiledScript(ScriptType.INLINE, "testAccessInScript", "js", se.compile(null, "l[0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("1")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", + o = se.executable(new CompiledScript(ScriptType.INLINE, "testAccessInScript", "js", se.compile(null, "l[3]", Collections.emptyMap())), vars).run(); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", + o = se.executable(new CompiledScript(ScriptType.INLINE, "testAccessInScript", "js", se.compile(null, "l[3].prop1", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("value1")); } @@ -163,7 +160,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { vars.put("ctx", ctx); Object compiledScript = se.compile(null, "ctx.value", Collections.emptyMap()); - ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution1", "js", + ExecutableScript script = se.executable(new CompiledScript(ScriptType.INLINE, "testChangingVarsCrossExecution1", "js", compiledScript), vars); ctx.put("value", 1); Object o = script.run(); @@ -178,7 +175,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { Map vars = new HashMap(); Object compiledScript = se.compile(null, "value", Collections.emptyMap()); - ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution2", "js", + ExecutableScript script = se.executable(new CompiledScript(ScriptType.INLINE, "testChangingVarsCrossExecution2", "js", compiledScript), vars); script.setNextVar("value", 1); Object o = script.run(); diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java index c3614952ecf..a6ce1c95a20 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import java.util.Collections; @@ -56,7 +56,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { Map vars = new HashMap(); vars.put("x", x); vars.put("y", y); - ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), vars); + ExecutableScript script = se.executable(new CompiledScript(ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), vars); for (int i = 0; i < between(100, 1000); i++) { long result = ((Number) script.run()).longValue(); assertThat(result, equalTo(addition)); @@ -95,7 +95,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { long x = Randomness.get().nextInt(); Map vars = new HashMap(); vars.put("x", x); - ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), vars); + ExecutableScript script = se.executable(new CompiledScript(ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), vars); for (int i = 0; i < between(100, 1000); i++) { long y = Randomness.get().nextInt(); long addition = x + y; @@ -141,7 +141,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { long addition = x + y; runtimeVars.put("x", x); runtimeVars.put("y", y); - long result = ((Number) se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), runtimeVars).run()).longValue(); + long result = ((Number) se.executable(new CompiledScript(ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), runtimeVars).run()).longValue(); assertThat(result, equalTo(addition)); } } catch (Exception e) { diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java index c1eb77d4e20..3928627840a 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.script.javascript; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import org.mozilla.javascript.EcmaError; import org.mozilla.javascript.WrappedException; @@ -54,7 +54,7 @@ public class JavaScriptSecurityTests extends ESTestCase { /** runs a script */ private void doTest(String script) { Map vars = new HashMap(); - se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "js", se.compile(null, script, Collections.emptyMap())), vars).run(); + se.executable(new CompiledScript(ScriptType.INLINE, "test", "js", se.compile(null, script, Collections.emptyMap())), vars).run(); } /** asserts that a script runs without exception */ diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/LangJavascriptClientYamlTestSuiteIT.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/LangJavascriptClientYamlTestSuiteIT.java index e89372c8b36..300b2325863 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/LangJavascriptClientYamlTestSuiteIT.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/LangJavascriptClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class LangJavascriptClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java index d31e691b994..3674a280a6e 100644 --- a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java +++ b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java @@ -48,9 +48,6 @@ import java.security.PrivilegedAction; import java.security.ProtectionDomain; import java.util.Map; -/** - * - */ //TODO we can optimize the case for Map similar to PyStringMap public class PythonScriptEngineService extends AbstractComponent implements ScriptEngineService { diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/LangPythonClientYamlTestSuiteIT.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/LangPythonClientYamlTestSuiteIT.java index 618ea6b20e5..a0b4452ba44 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/LangPythonClientYamlTestSuiteIT.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/LangPythonClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class LangPythonClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java index b96071e3848..f3bdd75a09a 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; @@ -36,9 +36,6 @@ import org.junit.Before; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -/** - * - */ public class PythonScriptEngineTests extends ESTestCase { private PythonScriptEngineService se; @@ -54,7 +51,7 @@ public class PythonScriptEngineTests extends ESTestCase { public void testSimpleEquation() { Map vars = new HashMap(); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "python", se.compile(null, "1 + 2", Collections.emptyMap())), vars).run(); + Object o = se.executable(new CompiledScript(ScriptType.INLINE, "testSimpleEquation", "python", se.compile(null, "1 + 2", Collections.emptyMap())), vars).run(); assertThat(((Number) o).intValue(), equalTo(3)); } @@ -65,13 +62,13 @@ public class PythonScriptEngineTests extends ESTestCase { Map obj2 = MapBuilder.newMapBuilder().put("prop2", "value2").map(); Map obj1 = MapBuilder.newMapBuilder().put("prop1", "value1").put("obj2", obj2).put("l", Arrays.asList("2", "1")).map(); vars.put("obj1", obj1); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile(null, "obj1", Collections.emptyMap())), vars).run(); + Object o = se.executable(new CompiledScript(ScriptType.INLINE, "testMapAccess", "python", se.compile(null, "obj1", Collections.emptyMap())), vars).run(); assertThat(o, instanceOf(Map.class)); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile(null, "obj1['l'][0]", Collections.emptyMap())), vars).run(); + o = se.executable(new CompiledScript(ScriptType.INLINE, "testMapAccess", "python", se.compile(null, "obj1['l'][0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("2")); } @@ -84,7 +81,7 @@ public class PythonScriptEngineTests extends ESTestCase { ctx.put("obj1", obj1); vars.put("ctx", ctx); - ExecutableScript executable = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testObjectInterMap", "python", + ExecutableScript executable = se.executable(new CompiledScript(ScriptType.INLINE, "testObjectInterMap", "python", se.compile(null, "ctx['obj2'] = { 'prop2' : 'value2' }; ctx['obj1']['prop1'] = 'uvalue1'", Collections.emptyMap())), vars); executable.run(); ctx = (Map) executable.unwrap(vars.get("ctx")); @@ -104,15 +101,15 @@ public class PythonScriptEngineTests extends ESTestCase { // Object o = se.execute(se.compile("l.length"), vars); // assertThat(((Number) o).intValue(), equalTo(4)); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile(null, "l[0]", Collections.emptyMap())), vars).run(); + Object o = se.executable(new CompiledScript(ScriptType.INLINE, "testAccessListInScript", "python", se.compile(null, "l[0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("1")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile(null, "l[3]", Collections.emptyMap())), vars).run(); + o = se.executable(new CompiledScript(ScriptType.INLINE, "testAccessListInScript", "python", se.compile(null, "l[3]", Collections.emptyMap())), vars).run(); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile(null, "l[3]['prop1']", Collections.emptyMap())), vars).run(); + o = se.executable(new CompiledScript(ScriptType.INLINE, "testAccessListInScript", "python", se.compile(null, "l[3]['prop1']", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("value1")); } @@ -122,7 +119,7 @@ public class PythonScriptEngineTests extends ESTestCase { vars.put("ctx", ctx); Object compiledScript = se.compile(null, "ctx['value']", Collections.emptyMap()); - ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution1", "python", compiledScript), vars); + ExecutableScript script = se.executable(new CompiledScript(ScriptType.INLINE, "testChangingVarsCrossExecution1", "python", compiledScript), vars); ctx.put("value", 1); Object o = script.run(); assertThat(((Number) o).intValue(), equalTo(1)); @@ -137,7 +134,7 @@ public class PythonScriptEngineTests extends ESTestCase { Map ctx = new HashMap(); Object compiledScript = se.compile(null, "value", Collections.emptyMap()); - ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution2", "python", compiledScript), vars); + ExecutableScript script = se.executable(new CompiledScript(ScriptType.INLINE, "testChangingVarsCrossExecution2", "python", compiledScript), vars); script.setNextVar("value", 1); Object o = script.run(); assertThat(((Number) o).intValue(), equalTo(1)); diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java index 0a887bc9a7e..940a66b43a5 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import java.util.Collections; @@ -35,15 +35,12 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class PythonScriptMultiThreadedTests extends ESTestCase { public void testExecutableNoRuntimeParams() throws Exception { final PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); final Object compiled = se.compile(null, "x + y", Collections.emptyMap()); - final CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "python", compiled); + final CompiledScript compiledScript = new CompiledScript(ScriptType.INLINE, "testExecutableNoRuntimeParams", "python", compiled); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[4]; @@ -129,7 +126,7 @@ public class PythonScriptMultiThreadedTests extends ESTestCase { public void testExecute() throws Exception { final PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); final Object compiled = se.compile(null, "x + y", Collections.emptyMap()); - final CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecute", "python", compiled); + final CompiledScript compiledScript = new CompiledScript(ScriptType.INLINE, "testExecute", "python", compiled); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[4]; diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java index 7ad13b31986..262783dc00c 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.script.python; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import org.python.core.PyException; @@ -55,7 +55,7 @@ public class PythonSecurityTests extends ESTestCase { /** runs a script */ private void doTest(String script) { Map vars = new HashMap(); - se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "python", se.compile(null, script, Collections.emptyMap())), vars).run(); + se.executable(new CompiledScript(ScriptType.INLINE, "test", "python", se.compile(null, script, Collections.emptyMap())), vars).run(); } /** asserts that a script runs without exception */ diff --git a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java index 204f6c07a99..3e9a5f13927 100644 --- a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java +++ b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class MapperMurmur3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCas @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return createParameters(0, 1); + return createParameters(); } } diff --git a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java index e10fdb72ff7..72930344bbf 100644 --- a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java +++ b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -42,6 +43,7 @@ import org.junit.Before; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.function.Supplier; import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.hamcrest.Matchers.containsString; @@ -58,8 +60,11 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { mapperRegistry = new MapperRegistry( Collections.singletonMap(Murmur3FieldMapper.CONTENT_TYPE, new Murmur3FieldMapper.TypeParser()), Collections.emptyMap()); + Supplier queryShardContext = () -> { + return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); } @Override @@ -152,8 +157,11 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build(); IndexService indexService2x = createIndex("test_old", oldIndexSettings); + Supplier queryShardContext = () -> { + return indexService2x.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; DocumentMapperParser parser = new DocumentMapperParser(indexService2x.getIndexSettings(), indexService2x.mapperService(), indexService2x.getIndexAnalyzers(), - indexService2x.similarityService(), mapperRegistry, indexService2x::newQueryShardContext); + indexService2x.similarityService(), mapperRegistry, queryShardContext); DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, defaultMapper.mappingSource().string()); diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java index 44f26c4ec51..d8de3635b77 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class MapperSizeClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return createParameters(0, 1); + return createParameters(); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java index 05bb911476a..66671931683 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java @@ -38,9 +38,6 @@ import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import java.util.Map; -/** - * - */ public class AzureBlobContainer extends AbstractBlobContainer { protected final Logger logger = Loggers.getLogger(AzureBlobContainer.class); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index cd201e7ff56..697f05e3dee 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -175,7 +175,7 @@ public class AzureStorageServiceImpl extends AbstractComponent implements AzureS blobContainer.createIfNotExists(); } catch (IllegalArgumentException e) { logger.trace((Supplier) () -> new ParameterizedMessage("fails creating container [{}]", container), e); - throw new RepositoryException(container, e.getMessage()); + throw new RepositoryException(container, e.getMessage(), e); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java index 51b5eae57ae..ba2011c276e 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java @@ -43,7 +43,7 @@ import java.util.concurrent.ConcurrentHashMap; */ public class AzureStorageServiceMock extends AbstractComponent implements AzureStorageService { - protected Map blobs = new ConcurrentHashMap<>(); + protected final Map blobs = new ConcurrentHashMap<>(); public AzureStorageServiceMock() { super(Settings.EMPTY); @@ -94,7 +94,7 @@ public class AzureStorageServiceMock extends AbstractComponent implements AzureS @Override public Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) { MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); - for (String blobName : blobs.keySet()) { + blobs.forEach((String blobName, ByteArrayOutputStream bos) -> { final String checkBlob; if (keyPath != null && !keyPath.isEmpty()) { // strip off key path from the beginning of the blob name @@ -103,9 +103,9 @@ public class AzureStorageServiceMock extends AbstractComponent implements AzureS checkBlob = blobName; } if (prefix == null || startsWithIgnoreCase(checkBlob, prefix)) { - blobsBuilder.put(blobName, new PlainBlobMetaData(checkBlob, blobs.get(blobName).size())); + blobsBuilder.put(blobName, new PlainBlobMetaData(checkBlob, bos.size())); } - } + }); return blobsBuilder.immutableMap(); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreServiceIntegTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreServiceIntegTests.java index 9ac15cae5a2..808ae13e678 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreServiceIntegTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreServiceIntegTests.java @@ -28,11 +28,14 @@ import org.elasticsearch.cloud.azure.AbstractAzureRepositoryServiceIntegTestCase import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotShardFailure; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; @ESIntegTestCase.ClusterScope( scope = ESIntegTestCase.Scope.SUITE, @@ -70,14 +73,20 @@ public class AzureSnapshotRestoreServiceIntegTests extends AbstractAzureReposito assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().totalHits(), equalTo(100L)); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster() + .prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test-idx-*", "-test-idx-3") + .get(); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), - equalTo(SnapshotState.SUCCESS)); + final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + if (snapshotInfo.shardFailures() != null) { + for (SnapshotShardFailure shardFailure : snapshotInfo.shardFailures()) { + logger.warn("shard failure during snapshot: {}", shardFailure::toString); + } + } + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + assertEquals(snapshotInfo.failedShards(), 0); logger.info("--> delete some data"); for (int i = 0; i < 50; i++) { diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java index 014014b432c..d1050e80adc 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.repositories.azure.AzureRepository.Repository; @@ -75,14 +74,6 @@ public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyIntegT return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - // In snapshot tests, we explicitly disable cloud discovery - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") - .build(); - } - @Override public Settings indexSettings() { // During restore we frequently restore index to exactly the same state it was before, that might cause the same diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java index c7eeff27401..2919d073978 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class RepositoryAzureClientYamlTestSuiteIT extends ESClientYamlSuiteTestC @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStoreContainerTests.java index 4fe8c718345..091c92ffd31 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/common/blobstore/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -27,7 +27,6 @@ import java.io.IOException; import java.util.Locale; public class GoogleCloudStorageBlobStoreContainerTests extends ESBlobStoreContainerTestCase { - @Override protected BlobStore newBlobStore() throws IOException { String bucket = randomAsciiOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/RepositoryGcsClientYamlTestSuiteIT.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/RepositoryGcsClientYamlTestSuiteIT.java index 52145bf87e2..6ed036e277f 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/RepositoryGcsClientYamlTestSuiteIT.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/RepositoryGcsClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class RepositoryGcsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCas @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return createParameters(0, 1); + return createParameters(); } } diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/RepositoryHdfsClientYamlTestSuiteIT.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/RepositoryHdfsClientYamlTestSuiteIT.java index 1dfbb3c51b7..264a350d514 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/RepositoryHdfsClientYamlTestSuiteIT.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/RepositoryHdfsClientYamlTestSuiteIT.java @@ -35,6 +35,6 @@ public class RepositoryHdfsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/repository-s3/config/repository-s3/log4j2.properties b/plugins/repository-s3/config/repository-s3/log4j2.properties index 3fee57ce3e2..aa52f0232e0 100644 --- a/plugins/repository-s3/config/repository-s3/log4j2.properties +++ b/plugins/repository-s3/config/repository-s3/log4j2.properties @@ -4,5 +4,5 @@ logger.com_amazonaws.level = warn logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error -logger_com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics -logger_com_amazonaws_metrics_AwsSdkMetrics.level = error +logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics +logger.com_amazonaws_metrics_AwsSdkMetrics.level = error diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java index 59c3d3445a2..cc8d43868d5 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java @@ -29,9 +29,6 @@ import org.elasticsearch.common.settings.Settings; import java.util.Locale; import java.util.function.Function; -/** - * - */ public interface AwsS3Service extends LifecycleComponent { // Global AWS settings (shared between discovery-ec2 and repository-s3) diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java index a9091788f28..5a8e83b33e6 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java @@ -42,9 +42,6 @@ import java.util.Map; import static org.elasticsearch.repositories.s3.S3Repository.getValue; -/** - * - */ public class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Service { /** diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobStore.java index 991e5f9707f..6a03b4bcea5 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobStore.java @@ -41,9 +41,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import java.util.ArrayList; import java.util.Locale; -/** - * - */ public class S3BlobStore extends AbstractComponent implements BlobStore { private final AmazonS3 client; diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java index f549b5c3949..0fc3d28555b 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import com.amazonaws.util.json.Jackson; import org.elasticsearch.SpecialPermission; import org.elasticsearch.cloud.aws.AwsS3Service; import org.elasticsearch.cloud.aws.InternalAwsS3Service; @@ -42,8 +43,6 @@ import org.elasticsearch.repositories.s3.S3Repository; */ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { - // ClientConfiguration clinit has some classloader problems - // TODO: fix that static { SecurityManager sm = System.getSecurityManager(); if (sm != null) { @@ -53,6 +52,10 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { @Override public Void run() { try { + // kick jackson to do some static caching of declared members info + Jackson.jsonNodeOf("{}"); + // ClientConfiguration clinit has some classloader problems + // TODO: fix that Class.forName("com.amazonaws.ClientConfiguration"); } catch (ClassNotFoundException e) { throw new RuntimeException(e); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index b1471e417f9..7310b527158 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; @@ -101,14 +102,27 @@ public class S3Repository extends BlobStoreRepository { */ Setting SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("repositories.s3.server_side_encryption", false, Property.NodeScope); + + /** + * Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of + * the available memory for smaller heaps. + */ + ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue( + Math.max( + ByteSizeUnit.MB.toBytes(5), // minimum value + Math.min( + ByteSizeUnit.MB.toBytes(100), + JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)), + ByteSizeUnit.BYTES); + /** * repositories.s3.buffer_size: Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, * the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and * to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the - * use of the Multipart API and may result in upload errors. Defaults to 100m. + * use of the Multipart API and may result in upload errors. Defaults to the minimum between 100MB and 5% of the heap size. */ Setting BUFFER_SIZE_SETTING = - Setting.byteSizeSetting("repositories.s3.buffer_size", new ByteSizeValue(100, ByteSizeUnit.MB), + Setting.byteSizeSetting("repositories.s3.buffer_size", DEFAULT_BUFFER_SIZE, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB), Property.NodeScope); /** * repositories.s3.max_retries: Number of retries in case of S3 errors. Defaults to 3. @@ -195,12 +209,13 @@ public class S3Repository extends BlobStoreRepository { * @see Repositories#SERVER_SIDE_ENCRYPTION_SETTING */ Setting SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false); + /** * buffer_size * @see Repositories#BUFFER_SIZE_SETTING */ Setting BUFFER_SIZE_SETTING = - Setting.byteSizeSetting("buffer_size", new ByteSizeValue(100, ByteSizeUnit.MB), + Setting.byteSizeSetting("buffer_size", Repositories.DEFAULT_BUFFER_SIZE, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB)); /** * max_retries @@ -306,11 +321,12 @@ public class S3Repository extends BlobStoreRepository { String basePath = getValue(metadata.settings(), settings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING); if (Strings.hasLength(basePath)) { - BlobPath path = new BlobPath(); - for(String elem : basePath.split("/")) { - path = path.add(elem); + if (basePath.startsWith("/")) { + basePath = basePath.substring(1); + deprecationLogger.deprecated("S3 repository base_path trimming the leading `/`, and " + + "leading `/` will not be supported for the S3 repository in future releases"); } - this.basePath = path; + this.basePath = new BlobPath().add(basePath); } else { this.basePath = BlobPath.cleanPath(); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AmazonS3Wrapper.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AmazonS3Wrapper.java index 7cda9ee0947..23c6755652e 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AmazonS3Wrapper.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AmazonS3Wrapper.java @@ -117,9 +117,6 @@ import java.net.URL; import java.util.Date; import java.util.List; -/** - * - */ @SuppressForbidden(reason = "implements AWS api that uses java.io.File!") public class AmazonS3Wrapper implements AmazonS3 { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java index 37087db386b..33458c0ab7c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java @@ -44,9 +44,6 @@ import java.util.concurrent.atomic.AtomicLong; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomDouble; -/** - * - */ public class TestAmazonS3 extends AmazonS3Wrapper { protected final Logger logger = Loggers.getLogger(getClass()); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java index 04c4f6fc0f1..9c567a570fe 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class RepositoryS3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index f8940c6158c..14595d13448 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; import static org.elasticsearch.repositories.s3.S3Repository.Repositories; import static org.elasticsearch.repositories.s3.S3Repository.Repository; @@ -104,4 +105,25 @@ public class S3RepositoryTests extends ESTestCase { Exception e = expectThrows(clazz, () -> new S3Repository(metadata, Settings.EMPTY, new DummyS3Service())); assertThat(e.getMessage(), containsString(msg)); } + + public void testBasePathSetting() throws IOException { + RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() + .put(Repository.BASE_PATH_SETTING.getKey(), "/foo/bar").build()); + S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, new DummyS3Service()); + assertEquals("foo/bar/", s3repo.basePath().buildAsString()); // make sure leading `/` is removed and trailing is added + + metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY); + Settings settings = Settings.builder().put(Repositories.BASE_PATH_SETTING.getKey(), "/foo/bar").build(); + s3repo = new S3Repository(metadata, settings, new DummyS3Service()); + assertEquals("foo/bar/", s3repo.basePath().buildAsString()); // make sure leading `/` is removed and trailing is added + } + + public void testDefaultBufferSize() { + ByteSizeValue defaultBufferSize = S3Repository.Repository.BUFFER_SIZE_SETTING.get(Settings.EMPTY); + assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(new ByteSizeValue(100, ByteSizeUnit.MB))); + assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(new ByteSizeValue(5, ByteSizeUnit.MB))); + + ByteSizeValue defaultNodeBufferSize = S3Repository.Repositories.BUFFER_SIZE_SETTING.get(Settings.EMPTY); + assertEquals(defaultBufferSize, defaultNodeBufferSize); + } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpTests.java index bcc430e840c..1c1a3457d7a 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpTests.java @@ -21,8 +21,6 @@ package org.elasticsearch.repositories.s3; import org.elasticsearch.common.settings.Settings; -/** - */ public class S3SnapshotRestoreOverHttpTests extends AbstractS3SnapshotRestoreTest { @Override public Settings nodeSettings(int nodeOrdinal) { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpsTests.java index 8bb53edce54..b888d015836 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpsTests.java @@ -21,8 +21,6 @@ package org.elasticsearch.repositories.s3; import org.elasticsearch.common.settings.Settings; -/** - */ public class S3SnapshotRestoreOverHttpsTests extends AbstractS3SnapshotRestoreTest { @Override public Settings nodeSettings(int nodeOrdinal) { diff --git a/plugins/store-smb/src/test/java/org/elasticsearch/index/store/StoreSmbClientYamlTestSuiteIT.java b/plugins/store-smb/src/test/java/org/elasticsearch/index/store/StoreSmbClientYamlTestSuiteIT.java index 72394b79693..0b9de745cac 100644 --- a/plugins/store-smb/src/test/java/org/elasticsearch/index/store/StoreSmbClientYamlTestSuiteIT.java +++ b/plugins/store-smb/src/test/java/org/elasticsearch/index/store/StoreSmbClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class StoreSmbClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/qa/backwards-5.0/build.gradle b/qa/backwards-5.0/build.gradle index 6e1beef51c7..7cb7d7a1a2a 100644 --- a/qa/backwards-5.0/build.gradle +++ b/qa/backwards-5.0/build.gradle @@ -19,5 +19,6 @@ integTest { numNodes = 2 numBwcNodes = 1 bwcVersion = "6.0.0-alpha1-SNAPSHOT" // this is the same as the current version until we released the first RC + setting 'logger.org.elasticsearch', 'DEBUG' } } diff --git a/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/Backwards50ClientYamlTestSuiteIT.java b/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/Backwards50ClientYamlTestSuiteIT.java index e3ab68b3477..af77b216bc4 100644 --- a/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/Backwards50ClientYamlTestSuiteIT.java +++ b/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/Backwards50ClientYamlTestSuiteIT.java @@ -37,7 +37,7 @@ public class Backwards50ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return createParameters(0, 1); + return createParameters(); } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 6b930c17111..3bed3350f0f 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -54,9 +54,11 @@ import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.StandardCopyOption; import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.GroupPrincipal; import java.nio.file.attribute.PosixFileAttributeView; import java.nio.file.attribute.PosixFileAttributes; import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.UserPrincipal; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -253,7 +255,7 @@ public class InstallPluginCommandTests extends ESTestCase { assertFalse("not a dir", Files.isDirectory(file)); if (isPosix) { PosixFileAttributes attributes = Files.readAttributes(file, PosixFileAttributes.class); - assertEquals(InstallPluginCommand.DIR_AND_EXECUTABLE_PERMS, attributes.permissions()); + assertEquals(InstallPluginCommand.BIN_FILES_PERMS, attributes.permissions()); } } } @@ -263,18 +265,33 @@ public class InstallPluginCommandTests extends ESTestCase { assertTrue("config dir exists", Files.exists(configDir)); assertTrue("config is a dir", Files.isDirectory(configDir)); + UserPrincipal user = null; + GroupPrincipal group = null; + if (isPosix) { - Path configRoot = env.configFile(); PosixFileAttributes configAttributes = - Files.getFileAttributeView(configRoot, PosixFileAttributeView.class).readAttributes(); + Files.getFileAttributeView(env.configFile(), PosixFileAttributeView.class).readAttributes(); + user = configAttributes.owner(); + group = configAttributes.group(); + PosixFileAttributes attributes = Files.getFileAttributeView(configDir, PosixFileAttributeView.class).readAttributes(); - assertThat(attributes.owner(), equalTo(configAttributes.owner())); - assertThat(attributes.group(), equalTo(configAttributes.group())); + assertThat(attributes.owner(), equalTo(user)); + assertThat(attributes.group(), equalTo(group)); } try (DirectoryStream stream = Files.newDirectoryStream(configDir)) { for (Path file : stream) { assertFalse("not a dir", Files.isDirectory(file)); + + if (isPosix) { + PosixFileAttributes attributes = Files.readAttributes(file, PosixFileAttributes.class); + if (user != null) { + assertThat(attributes.owner(), equalTo(user)); + } + if (group != null) { + assertThat(attributes.group(), equalTo(group)); + } + } } } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java index ddac8f66209..1422280165c 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java @@ -59,7 +59,7 @@ public class ListPluginsCommandTests extends ESTestCase { static MockTerminal listPlugins(Path home) throws Exception { return listPlugins(home, new String[0]); } - + static MockTerminal listPlugins(Path home, String[] args) throws Exception { String[] argsAndHome = new String[args.length + 1]; System.arraycopy(args, 0, argsAndHome, 0, args.length); @@ -69,16 +69,16 @@ public class ListPluginsCommandTests extends ESTestCase { assertEquals(ExitCodes.OK, status); return terminal; } - + static String buildMultiline(String... args){ return Arrays.asList(args).stream().collect(Collectors.joining("\n", "", "\n")); } - - static void buildFakePlugin(Environment env, String description, String name, String classname, String version) throws IOException { + + static void buildFakePlugin(Environment env, String description, String name, String classname) throws IOException { PluginTestUtil.writeProperties(env.pluginsFile().resolve(name), "description", description, "name", name, - "version", version, + "version", "1.0", "elasticsearch.version", Version.CURRENT.toString(), "java.version", System.getProperty("java.specification.version"), "classname", classname); @@ -97,51 +97,51 @@ public class ListPluginsCommandTests extends ESTestCase { } public void testOnePlugin() throws Exception { - buildFakePlugin(env, "fake desc", "fake", "org.fake", "1.0.0"); + buildFakePlugin(env, "fake desc", "fake", "org.fake"); MockTerminal terminal = listPlugins(home); - assertEquals(terminal.getOutput(), buildMultiline("fake@1.0.0")); + assertEquals(terminal.getOutput(), buildMultiline("fake")); } public void testTwoPlugins() throws Exception { - buildFakePlugin(env, "fake desc", "fake1", "org.fake", "1.2.3"); - buildFakePlugin(env, "fake desc 2", "fake2", "org.fake", "6.5.4"); + buildFakePlugin(env, "fake desc", "fake1", "org.fake"); + buildFakePlugin(env, "fake desc 2", "fake2", "org.fake"); MockTerminal terminal = listPlugins(home); - assertEquals(terminal.getOutput(), buildMultiline("fake1@1.2.3", "fake2@6.5.4")); + assertEquals(terminal.getOutput(), buildMultiline("fake1", "fake2")); } - + public void testPluginWithVerbose() throws Exception { - buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake", "1.0.0"); + buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake"); String[] params = { "-v" }; MockTerminal terminal = listPlugins(home, params); - assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), "fake_plugin@1.0.0", - "- Plugin information:", "Name: fake_plugin", "Description: fake desc", "Version: 1.0.0", " * Classname: org.fake")); + assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), "fake_plugin", + "- Plugin information:", "Name: fake_plugin", "Description: fake desc", "Version: 1.0", " * Classname: org.fake")); } - + public void testPluginWithVerboseMultiplePlugins() throws Exception { - buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake", "1.2.3"); - buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2", "6.5.4"); + buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake"); + buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); String[] params = { "-v" }; MockTerminal terminal = listPlugins(home, params); assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), - "fake_plugin1@1.2.3", "- Plugin information:", "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.2.3", - " * Classname: org.fake", "fake_plugin2@6.5.4", "- Plugin information:", "Name: fake_plugin2", - "Description: fake desc 2", "Version: 6.5.4", " * Classname: org.fake2")); + "fake_plugin1", "- Plugin information:", "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.0", + " * Classname: org.fake", "fake_plugin2", "- Plugin information:", "Name: fake_plugin2", + "Description: fake desc 2", "Version: 1.0", " * Classname: org.fake2")); } - + public void testPluginWithoutVerboseMultiplePlugins() throws Exception { - buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake", "1.0.0"); - buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2", "1.0.0"); + buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake"); + buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); MockTerminal terminal = listPlugins(home, new String[0]); String output = terminal.getOutput(); - assertEquals(output, buildMultiline("fake_plugin1@1.0.0", "fake_plugin2@1.0.0")); + assertEquals(output, buildMultiline("fake_plugin1", "fake_plugin2")); } - + public void testPluginWithoutDescriptorFile() throws Exception{ Files.createDirectories(env.pluginsFile().resolve("fake1")); NoSuchFileException e = expectThrows(NoSuchFileException.class, () -> listPlugins(home)); assertEquals(e.getFile(), env.pluginsFile().resolve("fake1").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString()); } - + public void testPluginWithWrongDescriptorFile() throws Exception{ PluginTestUtil.writeProperties(env.pluginsFile().resolve("fake1"), "description", "fake desc"); @@ -149,5 +149,5 @@ public class ListPluginsCommandTests extends ESTestCase { assertEquals(e.getMessage(), "Property [name] is missing in [" + env.pluginsFile().resolve("fake1").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString() + "]"); } - + } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java index ab4f00492b0..0078c61898d 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.plugins; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.settings.Settings; @@ -27,7 +28,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import java.io.BufferedReader; import java.io.IOException; +import java.io.StringReader; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; @@ -43,6 +46,7 @@ public class RemovePluginCommandTests extends ESTestCase { private Path home; private Environment env; + @Override @Before public void setUp() throws Exception { super.setUp(); @@ -130,8 +134,24 @@ public class RemovePluginCommandTests extends ESTestCase { assertThat(terminal.getOutput(), not(containsString(expectedConfigDirPreservedMessage(configDir)))); } + public void testRemoveUninstalledPluginErrors() throws Exception { + UserException e = expectThrows(UserException.class, () -> removePlugin("fake", home)); + assertEquals(ExitCodes.CONFIG, e.exitCode); + assertEquals("plugin fake not found; run 'elasticsearch-plugin list' to get list of installed plugins", e.getMessage()); + + MockTerminal terminal = new MockTerminal(); + new RemovePluginCommand().main(new String[] { "-Epath.home=" + home, "fake" }, terminal); + try (BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput()))) { + assertEquals("-> Removing fake...", reader.readLine()); + assertEquals("ERROR: plugin fake not found; run 'elasticsearch-plugin list' to get list of installed plugins", + reader.readLine()); + assertNull(reader.readLine()); + } + } + private String expectedConfigDirPreservedMessage(final Path configDir) { return "-> Preserving plugin config files [" + configDir + "] in case of upgrade, delete manually if not needed"; } } + diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 0171dfb99d1..34621802f55 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -26,20 +26,23 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.discovery.MockZenPing; +import org.elasticsearch.transport.MockTcpTransportPlugin; import org.junit.AfterClass; import org.junit.BeforeClass; import java.io.IOException; import java.nio.file.Path; -import java.util.Collections; +import java.util.Arrays; +import java.util.List; import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.equalTo; @@ -60,26 +63,26 @@ public class TribeUnitTests extends ESTestCase { public static void createTribes() throws NodeValidationException { Settings baseSettings = Settings.builder() .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put("transport.type", "local") - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") + .put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .build(); - tribe1 = new TribeClientNode( + final List> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class); + tribe1 = new MockNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe1") .put("node.name", "tribe1_node") .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) - .build(), Collections.emptyList()).start(); - tribe2 = new TribeClientNode( + .build(), mockPlugins).start(); + tribe2 = new MockNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe2") .put("node.name", "tribe2_node") .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) - .build(), Collections.emptyList()).start(); + .build(), mockPlugins).start(); } @AfterClass @@ -101,13 +104,13 @@ public class TribeUnitTests extends ESTestCase { private static void assertTribeNodeSuccessfullyCreated(Settings extraSettings) throws Exception { //The tribe clients do need it to make sure they can find their corresponding tribes using the proper transport Settings settings = Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false).put("node.name", "tribe_node") - .put("transport.type", "local").put("discovery.type", "local") - .put("tribe.t1.transport.type", "local").put("tribe.t2.transport.type", "local") - .put("tribe.t1.discovery.type", "local").put("tribe.t2.discovery.type", "local") + .put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).put("discovery.type", "local") + .put("tribe.t1.transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) + .put("tribe.t2.transport.type",MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(extraSettings).build(); - try (Node node = new Node(settings).start()) { + try (Node node = new MockNode(settings, Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class)).start()) { try (Client client = node.client()) { assertBusy(() -> { ClusterState state = client.admin().cluster().prepareState().clear().setNodes(true).get().getState(); diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/log4j2.properties index aca53f81c1b..6cfb6a4ec37 100644 --- a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/log4j2.properties +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/config/log4j2.properties @@ -1,5 +1,3 @@ -status = error - appender.console.type = Console appender.console.name = console appender.console.layout.type = PatternLayout diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/deprecation/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/deprecation/log4j2.properties index 744e554ff91..3f4958adee8 100644 --- a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/deprecation/log4j2.properties +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/deprecation/log4j2.properties @@ -1,5 +1,3 @@ -status = error - appender.console.type = Console appender.console.name = console appender.console.layout.type = PatternLayout diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/find_appender/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/find_appender/log4j2.properties index 80a28771620..8553ec5e791 100644 --- a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/find_appender/log4j2.properties +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/find_appender/log4j2.properties @@ -1,5 +1,3 @@ -status = error - appender.console.type = Console appender.console.name = console appender.console.layout.type = PatternLayout diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/hierarchy/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/hierarchy/log4j2.properties index 622f632f4b1..fbd35a4c684 100644 --- a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/hierarchy/log4j2.properties +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/hierarchy/log4j2.properties @@ -1,5 +1,3 @@ -status = error - appender.console.type = Console appender.console.name = console appender.console.layout.type = PatternLayout @@ -7,7 +5,6 @@ appender.console.layout.pattern = %m%n rootLogger.level = info rootLogger.appenderRef.console.ref = console -rootLogger.appenderRef.file.ref = file logger.x.name = x logger.x.level = trace diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/location_info/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/location_info/log4j2.properties index d1a2c534b83..edb143d5fc5 100644 --- a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/location_info/log4j2.properties +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/location_info/log4j2.properties @@ -1,5 +1,3 @@ -status = error - appender.console.type = Console appender.console.name = console appender.console.layout.type = PatternLayout diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index 496a02e42dc..f58d618adf3 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -42,7 +42,7 @@ public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return createParameters(0, 1); + return createParameters(); } } diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java index d36c2aa04d2..bc1c95424d8 100644 --- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java +++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java @@ -27,13 +27,11 @@ import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.Netty3Plugin; -import org.elasticsearch.transport.Netty4Plugin; import org.elasticsearch.transport.client.PreBuiltTransportClient; import org.junit.After; import org.junit.AfterClass; @@ -125,7 +123,7 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase { for (String stringAddress : stringAddresses) { URL url = new URL("http://" + stringAddress); InetAddress inetAddress = InetAddress.getByName(url.getHost()); - transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort())); + transportAddresses[i++] = new TransportAddress(new InetSocketAddress(inetAddress, url.getPort())); } return startClient(createTempDir(), transportAddresses); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java index cff7b55f9de..8a998a4e535 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java @@ -27,11 +27,11 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -80,7 +80,7 @@ public class TestDeprecationHeaderRestAction extends BaseRestHandler { @SuppressWarnings("unchecked") // List casts @Override - public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final List settings; try (XContentParser parser = XContentFactory.xContent(request.content()).createParser(request.content())) { @@ -95,14 +95,15 @@ public class TestDeprecationHeaderRestAction extends BaseRestHandler { } } - final XContentBuilder builder = channel.newBuilder(); + return channel -> { + final XContentBuilder builder = channel.newBuilder(); - builder.startObject().startArray("settings"); - for (String setting : settings) { - builder.startObject().field(setting, SETTINGS.get(setting).getRaw(this.settings)).endObject(); - } - builder.endArray().endObject(); - - channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); + builder.startObject().startArray("settings"); + for (String setting : settings) { + builder.startObject().field(setting, SETTINGS.get(setting).getRaw(this.settings)).endObject(); + } + builder.endArray().endObject(); + channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); + }; } } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestResponseHeaderRestAction.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestResponseHeaderRestAction.java index a321fc0a457..a38fd3d015e 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestResponseHeaderRestAction.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestResponseHeaderRestAction.java @@ -23,12 +23,13 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; + public class TestResponseHeaderRestAction extends BaseRestHandler { @Inject @@ -38,15 +39,15 @@ public class TestResponseHeaderRestAction extends BaseRestHandler { } @Override - public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { if ("password".equals(request.header("Secret"))) { RestResponse response = new BytesRestResponse(RestStatus.OK, "Access granted"); response.addHeader("Secret", "granted"); - channel.sendResponse(response); + return channel -> channel.sendResponse(response); } else { RestResponse response = new BytesRestResponse(RestStatus.UNAUTHORIZED, "Access denied"); response.addHeader("Secret", "required"); - channel.sendResponse(response); + return channel -> channel.sendResponse(response); } } } diff --git a/qa/smoke-test-ingest-disabled/src/test/java/org/elasticsearch/smoketest/SmokeTestIngestDisabledClientYamlTestSuiteIT.java b/qa/smoke-test-ingest-disabled/src/test/java/org/elasticsearch/smoketest/SmokeTestIngestDisabledClientYamlTestSuiteIT.java index c8d506424c0..beb7499bf7f 100644 --- a/qa/smoke-test-ingest-disabled/src/test/java/org/elasticsearch/smoketest/SmokeTestIngestDisabledClientYamlTestSuiteIT.java +++ b/qa/smoke-test-ingest-disabled/src/test/java/org/elasticsearch/smoketest/SmokeTestIngestDisabledClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class SmokeTestIngestDisabledClientYamlTestSuiteIT extends ESClientYamlSu @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/smoketest/SmokeTestIngestWithAllDepsClientYamlTestSuiteIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/smoketest/SmokeTestIngestWithAllDepsClientYamlTestSuiteIT.java index b3b84dfc55e..0bd7b9ac029 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/smoketest/SmokeTestIngestWithAllDepsClientYamlTestSuiteIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/smoketest/SmokeTestIngestWithAllDepsClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class SmokeTestIngestWithAllDepsClientYamlTestSuiteIT extends ESClientYam @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml index 8a80dec1c09..50d85238caa 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yaml @@ -9,7 +9,6 @@ "processors": [ { "script" : { - "lang" : "painless", "inline": "ctx.bytes_total = (ctx.bytes_in + ctx.bytes_out) * params.factor", "params": { "factor": 10 @@ -48,7 +47,6 @@ "processors": [ { "script" : { - "lang" : "painless", "file": "master" } } @@ -94,7 +92,6 @@ "processors": [ { "script" : { - "lang" : "painless", "id" : "sum_bytes" } } diff --git a/qa/smoke-test-multinode/src/test/java/org/elasticsearch/smoketest/SmokeTestMultiNodeClientYamlTestSuiteIT.java b/qa/smoke-test-multinode/src/test/java/org/elasticsearch/smoketest/SmokeTestMultiNodeClientYamlTestSuiteIT.java index 456387e6c19..1fe8cfeb9d5 100644 --- a/qa/smoke-test-multinode/src/test/java/org/elasticsearch/smoketest/SmokeTestMultiNodeClientYamlTestSuiteIT.java +++ b/qa/smoke-test-multinode/src/test/java/org/elasticsearch/smoketest/SmokeTestMultiNodeClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class SmokeTestMultiNodeClientYamlTestSuiteIT extends ESClientYamlSuiteTe @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/qa/smoke-test-plugins/src/test/java/org/elasticsearch/smoketest/SmokeTestPluginsClientYamlTestSuiteIT.java b/qa/smoke-test-plugins/src/test/java/org/elasticsearch/smoketest/SmokeTestPluginsClientYamlTestSuiteIT.java index 05021f3c2bb..0f5636095e9 100644 --- a/qa/smoke-test-plugins/src/test/java/org/elasticsearch/smoketest/SmokeTestPluginsClientYamlTestSuiteIT.java +++ b/qa/smoke-test-plugins/src/test/java/org/elasticsearch/smoketest/SmokeTestPluginsClientYamlTestSuiteIT.java @@ -36,7 +36,7 @@ public class SmokeTestPluginsClientYamlTestSuiteIT extends ESClientYamlSuiteTest @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/qa/smoke-test-reindex-with-painless/src/test/java/org/elasticsearch/smoketest/SmokeTestReindexWithPainlessClientYamlTestSuiteIT.java b/qa/smoke-test-reindex-with-painless/src/test/java/org/elasticsearch/smoketest/SmokeTestReindexWithPainlessClientYamlTestSuiteIT.java index db01ce2dfe9..fb4f27210d8 100644 --- a/qa/smoke-test-reindex-with-painless/src/test/java/org/elasticsearch/smoketest/SmokeTestReindexWithPainlessClientYamlTestSuiteIT.java +++ b/qa/smoke-test-reindex-with-painless/src/test/java/org/elasticsearch/smoketest/SmokeTestReindexWithPainlessClientYamlTestSuiteIT.java @@ -35,6 +35,6 @@ public class SmokeTestReindexWithPainlessClientYamlTestSuiteIT extends ESClientY @ParametersFactory public static Iterable parameters() throws IOException, ClientYamlTestParseException { - return ESClientYamlSuiteTestCase.createParameters(0, 1); + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index fb7ee1d8d3c..7bd9861a9d1 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -37,7 +37,6 @@ List availableBoxes = [ 'sles-12', 'ubuntu-1204', 'ubuntu-1404', - 'ubuntu-1504', 'ubuntu-1604' ] @@ -91,7 +90,14 @@ configurations { } repositories { - mavenCentral() + mavenCentral() // Try maven central first, it'll have releases before 5.0.0 + /* Setup a repository that tries to download from + https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext] + which should work for 5.0.0+. This isn't a real ivy repository but gradle + is fine with that */ + ivy { + artifactPattern "https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext]" + } } dependencies { @@ -126,10 +132,7 @@ Set getVersions() { new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> xml = new XmlParser().parse(s) } - - // List all N-1 releases from maven central - int major = Integer.parseInt(project.version.substring(0, project.version.indexOf('.'))) - 1 - Set versions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /$major\.\d\.\d/ }) + Set versions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /[5]\.\d\.\d/ }) if (versions.isEmpty() == false) { return versions; } @@ -152,9 +155,10 @@ task verifyPackagingTestUpgradeFromVersions { String maybeUpdateFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null) if (maybeUpdateFromVersions == null) { Set versions = getVersions() - Set actualVersions = new HashSet<>(Arrays.asList(upgradeFromVersions)) + Set actualVersions = new TreeSet<>(Arrays.asList(upgradeFromVersions)) if (!versions.equals(actualVersions)) { - throw new GradleException("out-of-date versions [" + actualVersions + "], expected [" + versions + "]; run gradle updatePackagingTestUpgradeFromVersions") + throw new GradleException("out-of-date versions " + actualVersions + + ", expected " + versions + "; run gradle updatePackagingTestUpgradeFromVersions") } } } @@ -204,9 +208,26 @@ task checkVagrantVersion(type: Exec) { standardOutput = new ByteArrayOutputStream() doLast { String version = standardOutput.toString().trim() - if ((version ==~ /Vagrant 1\.[789]\..+/) == false) { - throw new InvalidUserDataException( - "Illegal version of vagrant [${version}]. Need [Vagrant 1.7+]") + if ((version ==~ /Vagrant 1\.(8\.[6-9]|9\.[0-9])+/) == false) { + throw new InvalidUserDataException("Illegal version of vagrant [${version}]. Need [Vagrant 1.8.6+]") + } + } +} + +task checkVirtualBoxVersion(type: Exec) { + commandLine 'vboxmanage', '--version' + standardOutput = new ByteArrayOutputStream() + doLast { + String version = standardOutput.toString().trim() + try { + String[] versions = version.split('\\.') + int major = Integer.parseInt(versions[0]) + int minor = Integer.parseInt(versions[1]) + if ((major < 5) || (major == 5 && minor < 1)) { + throw new InvalidUserDataException("Illegal version of virtualbox [${version}]. Need [5.1+]") + } + } catch (NumberFormatException | ArrayIndexOutOfBoundsException e) { + throw new InvalidUserDataException("Unable to parse version of virtualbox [${version}]. Required [5.1+]", e) } } } @@ -243,12 +264,12 @@ for (String box : availableBoxes) { Task update = tasks.create("vagrant${boxTask}#update", VagrantCommandTask) { boxName box args 'box', 'update', box - dependsOn checkVagrantVersion + dependsOn checkVagrantVersion, checkVirtualBoxVersion } Task up = tasks.create("vagrant${boxTask}#up", VagrantCommandTask) { boxName box - /* Its important that we try to reprovision the box even if it already + /* It's important that we try to reprovision the box even if it already exists. That way updates to the vagrant configuration take automatically. That isn't to say that the updates will always be compatible. Its ok to just destroy the boxes if they get busted but that is a manual step diff --git a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats index 7f9ce21e85d..9712febc760 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats @@ -73,6 +73,13 @@ setup() { verify_archive_installation } +@test "[TAR] verify elasticsearch-plugin list runs without any plugins installed" { + # previously this would fail because the archive installations did + # not create an empty plugins directory + local plugins_list=`$ESHOME/bin/elasticsearch-plugin list` + [[ -z $plugins_list ]] +} + @test "[TAR] elasticsearch fails if java executable is not found" { local JAVA=$(which java) @@ -103,12 +110,12 @@ setup() { local temp=`mktemp -d` touch "$temp/jvm.options" chown -R elasticsearch:elasticsearch "$temp" - echo "-Xms264m" >> "$temp/jvm.options" - echo "-Xmx264m" >> "$temp/jvm.options" + echo "-Xms512m" >> "$temp/jvm.options" + echo "-Xmx512m" >> "$temp/jvm.options" export ES_JVM_OPTIONS="$temp/jvm.options" export ES_JAVA_OPTS="-XX:-UseCompressedOops" start_elasticsearch_service - curl -s -XGET localhost:9200/_nodes | fgrep '"heap_init_in_bytes":276824064' + curl -s -XGET localhost:9200/_nodes | fgrep '"heap_init_in_bytes":536870912' curl -s -XGET localhost:9200/_nodes | fgrep '"using_compressed_ordinary_object_pointers":"false"' stop_elasticsearch_service export ES_JVM_OPTIONS=$es_jvm_options diff --git a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats index 52f3de34a97..d435a76b9c7 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats @@ -74,6 +74,11 @@ setup() { verify_package_installation } +@test "[DEB] verify elasticsearch-plugin list runs without any plugins installed" { + local plugins_list=`$ESHOME/bin/elasticsearch-plugin list` + [[ -z $plugins_list ]] +} + @test "[DEB] elasticsearch isn't started by package install" { # Wait a second to give Elasticsearch a change to start if it is going to. # This isn't perfect by any means but its something. @@ -94,6 +99,11 @@ setup() { run_elasticsearch_tests } +@test "[DEB] verify package installation after start" { + # Checks that the startup scripts didn't change the permissions + verify_package_installation +} + ################################## # Uninstall DEB package ################################## diff --git a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats index 50c6849e92e..b6ec78509d1 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats @@ -73,6 +73,11 @@ setup() { verify_package_installation } +@test "[RPM] verify elasticsearch-plugin list runs without any plugins installed" { + local plugins_list=`$ESHOME/bin/elasticsearch-plugin list` + [[ -z $plugins_list ]] +} + @test "[RPM] elasticsearch isn't started by package install" { # Wait a second to give Elasticsearch a change to start if it is going to. # This isn't perfect by any means but its something. @@ -89,6 +94,11 @@ setup() { run_elasticsearch_tests } +@test "[RPM] verify package installation after start" { + # Checks that the startup scripts didn't change the permissions + verify_package_installation +} + @test "[RPM] remove package" { # User installed scripts aren't removed so we'll just get them ourselves rm -rf $ESSCRIPTS @@ -145,6 +155,10 @@ setup() { rpm -qe 'elasticsearch' } +@test "[RPM] verify package reinstallation" { + verify_package_installation +} + @test "[RPM] reremove package" { echo "# ping" >> "/etc/elasticsearch/elasticsearch.yml" echo "# ping" >> "/etc/elasticsearch/jvm.options" diff --git a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats index 857a0ba3978..de1416059dd 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats @@ -73,6 +73,26 @@ setup() { run date +%s epoch="$output" + # The OpenJDK packaged for CentOS and OEL both override the default value (false) for the JVM option "AssumeMP". + # + # Because it is forced to "true" by default for these packages, the following warning message is printed to the + # standard output when the Vagrant box has only 1 CPU: + # OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure + # the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N + # + # This message will then fail the next test where we check if no entries have been added to the journal. + # + # This message appears since with java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64 because of the commit: + # 2016-10-10 - Andrew Hughes - 1:1.8.0.111-1.b15 - Turn debug builds on for all JIT architectures. + # Always AssumeMP on RHEL. + # - Resolves: rhbz#1381990 + # + # Here we set the "-XX:-AssumeMP" option to false again: + lsb_release=$(lsb_release -i) + if [[ "$lsb_release" =~ "CentOS" ]] || [[ "$lsb_release" =~ "OracleServer" ]]; then + echo "-XX:-AssumeMP" >> $ESCONFIG/jvm.options + fi + systemctl start elasticsearch.service wait_for_elasticsearch_status assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" @@ -85,7 +105,11 @@ setup() { # Verifies that no new entries in journald have been added # since the last start result="$(journalctl _SYSTEMD_UNIT=elasticsearch.service --since "$since" --output cat | wc -l)" - [ "$result" -eq "0" ] + [ "$result" -eq "0" ] || { + echo "Expected no entries in journalctl for the Elasticsearch service but found:" + journalctl _SYSTEMD_UNIT=elasticsearch.service --since "$since" + false + } } @test "[SYSTEMD] start (running)" { diff --git a/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats b/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats index fa96882f914..237c8956c40 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats @@ -134,6 +134,26 @@ setup() { [ "$status" -eq 3 ] || [ "$status" -eq 4 ] } +@test "[INIT.D] start Elasticsearch with custom JVM options" { + assert_file_exist $ESENVFILE + local es_java_opts=$ES_JAVA_OPTS + local es_jvm_options=$ES_JVM_OPTIONS + local temp=`mktemp -d` + touch "$temp/jvm.options" + chown -R elasticsearch:elasticsearch "$temp" + echo "-Xms512m" >> "$temp/jvm.options" + echo "-Xmx512m" >> "$temp/jvm.options" + cp $ESENVFILE "$temp/elasticsearch" + echo "ES_JVM_OPTIONS=\"$temp/jvm.options\"" >> $ESENVFILE + echo "ES_JAVA_OPTS=\"-XX:-UseCompressedOops\"" >> $ESENVFILE + service elasticsearch start + wait_for_elasticsearch_status + curl -s -XGET localhost:9200/_nodes | fgrep '"heap_init_in_bytes":536870912' + curl -s -XGET localhost:9200/_nodes | fgrep '"using_compressed_ordinary_object_pointers":"false"' + service elasticsearch stop + cp "$temp/elasticsearch" $ESENVFILE +} + # Simulates the behavior of a system restart: # the PID directory is deleted by the operating system # but it should not block ES from starting diff --git a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash index b5a494fd084..b979f40e309 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash @@ -205,6 +205,10 @@ fi install_and_check_plugin analysis stempel } +@test "[$GROUP] install ukrainian plugin" { + install_and_check_plugin analysis ukrainian morfologik-fsa-*.jar morfologik-stemming-*.jar +} + @test "[$GROUP] install gce plugin" { install_and_check_plugin discovery gce google-api-client-*.jar } @@ -222,10 +226,10 @@ fi } @test "[$GROUP] install ingest-attachment plugin" { - # we specify the version on the poi-3.15-beta1.jar so that the test does + # we specify the version on the poi-3.15.jar so that the test does # not spuriously pass if the jar is missing but the other poi jars # are present - install_and_check_plugin ingest attachment bcprov-jdk15on-*.jar tika-core-*.jar pdfbox-*.jar poi-3.15-beta1.jar poi-ooxml-3.15-beta1.jar poi-ooxml-schemas-*.jar poi-scratchpad-*.jar + install_and_check_plugin ingest attachment bcprov-jdk15on-*.jar tika-core-*.jar pdfbox-*.jar poi-3.15.jar poi-ooxml-3.15.jar poi-ooxml-schemas-*.jar poi-scratchpad-*.jar } @test "[$GROUP] install ingest-geoip plugin" { @@ -341,6 +345,10 @@ fi remove_plugin analysis-stempel } +@test "[$GROUP] remove ukrainian plugin" { + remove_plugin analysis-ukrainian +} + @test "[$GROUP] remove gce plugin" { remove_plugin discovery-gce } @@ -428,38 +436,21 @@ fi sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output # exclude progress line local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) - if [ "$GROUP" == "TAR PLUGINS" ]; then - # tar extraction does not create the plugins directory so the plugin tool will print an additional line that the directory will be created - [ "$loglines" -eq "3" ] || { - echo "Expected 3 lines excluding progress bar but the output had $loglines lines and was:" - cat /tmp/plugin-cli-output - false - } - else - [ "$loglines" -eq "2" ] || { - echo "Expected 2 lines excluding progress bar but the output had $loglines lines and was:" - cat /tmp/plugin-cli-output - false - } - fi + [ "$loglines" -eq "2" ] || { + echo "Expected 2 lines excluding progress bar but the output had $loglines lines and was:" + cat /tmp/plugin-cli-output + false + } remove_jvm_example local relativePath=${1:-$(readlink -m jvm-example-*.zip)} sudo -E -u $ESPLUGIN_COMMAND_USER ES_JAVA_OPTS="-Des.logger.level=DEBUG" "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) - if [ "$GROUP" == "TAR PLUGINS" ]; then - [ "$loglines" -gt "3" ] || { - echo "Expected more than 3 lines excluding progress bar but the output had $loglines lines and was:" - cat /tmp/plugin-cli-output - false - } - else - [ "$loglines" -gt "2" ] || { - echo "Expected more than 2 lines excluding progress bar but the output had $loglines lines and was:" - cat /tmp/plugin-cli-output - false - } - fi + [ "$loglines" -gt "2" ] || { + echo "Expected more than 2 lines excluding progress bar but the output had $loglines lines and was:" + cat /tmp/plugin-cli-output + false + } remove_jvm_example } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash index 1060aa78849..db274b64985 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash @@ -36,6 +36,12 @@ export_elasticsearch_paths() { export ESDATA="/var/lib/elasticsearch" export ESLOG="/var/log/elasticsearch" export ESPIDDIR="/var/run/elasticsearch" + if is_dpkg; then + export ESENVFILE="/etc/default/elasticsearch" + fi + if is_rpm; then + export ESENVFILE="/etc/sysconfig/elasticsearch" + fi } # Install the rpm or deb package. @@ -80,13 +86,17 @@ verify_package_installation() { assert_file "$ESHOME" d root root 755 assert_file "$ESHOME/bin" d root root 755 + assert_file "$ESHOME/bin/elasticsearch" f root root 755 + assert_file "$ESHOME/bin/elasticsearch-plugin" f root root 755 + assert_file "$ESHOME/bin/elasticsearch-translog" f root root 755 assert_file "$ESHOME/lib" d root root 755 assert_file "$ESCONFIG" d root elasticsearch 750 - assert_file "$ESCONFIG/elasticsearch.yml" f root elasticsearch 750 - assert_file "$ESCONFIG/log4j2.properties" f root elasticsearch 750 + assert_file "$ESCONFIG/elasticsearch.yml" f root elasticsearch 660 + assert_file "$ESCONFIG/jvm.options" f root elasticsearch 660 + assert_file "$ESCONFIG/log4j2.properties" f root elasticsearch 660 assert_file "$ESSCRIPTS" d root elasticsearch 750 - assert_file "$ESDATA" d elasticsearch elasticsearch 755 - assert_file "$ESLOG" d elasticsearch elasticsearch 755 + assert_file "$ESDATA" d elasticsearch elasticsearch 750 + assert_file "$ESLOG" d elasticsearch elasticsearch 750 assert_file "$ESPLUGINS" d root root 755 assert_file "$ESMODULES" d root root 755 assert_file "$ESPIDDIR" d elasticsearch elasticsearch 755 @@ -95,7 +105,7 @@ verify_package_installation() { if is_dpkg; then # Env file - assert_file "/etc/default/elasticsearch" f root root 644 + assert_file "/etc/default/elasticsearch" f root root 660 # Doc files assert_file "/usr/share/doc/elasticsearch" d root root 755 @@ -104,7 +114,7 @@ verify_package_installation() { if is_rpm; then # Env file - assert_file "/etc/sysconfig/elasticsearch" f root root 644 + assert_file "/etc/sysconfig/elasticsearch" f root root 660 # License file assert_file "/usr/share/elasticsearch/LICENSE.txt" f root root 644 fi @@ -114,4 +124,15 @@ verify_package_installation() { assert_file "/usr/lib/tmpfiles.d/elasticsearch.conf" f root root 644 assert_file "/usr/lib/sysctl.d/elasticsearch.conf" f root root 644 fi + + if is_sysvinit; then + assert_file "/etc/init.d/elasticsearch" f root root 750 + fi + + run sudo -E -u vagrant LANG="en_US.UTF-8" cat "$ESCONFIG/elasticsearch.yml" + [ $status = 1 ] + [[ "$output" == *"Permission denied"* ]] || { + echo "Expected permission denied but found $output:" + false + } } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash index afae7439057..55e7fdfc484 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash @@ -83,7 +83,6 @@ install_jvm_example() { #just make sure that everything is the same as the parent bin dir, which was properly set up during install bin_user=$(find "$ESHOME/bin" -maxdepth 0 -printf "%u") bin_owner=$(find "$ESHOME/bin" -maxdepth 0 -printf "%g") - bin_privileges=$(find "$ESHOME/bin" -maxdepth 0 -printf "%m") assert_file "$ESHOME/bin/jvm-example" d $bin_user $bin_owner 755 assert_file "$ESHOME/bin/jvm-example/test" f $bin_user $bin_owner 755 @@ -92,8 +91,15 @@ install_jvm_example() { config_user=$(find "$ESCONFIG" -maxdepth 0 -printf "%u") config_owner=$(find "$ESCONFIG" -maxdepth 0 -printf "%g") # directories should user the user file-creation mask - assert_file "$ESCONFIG/jvm-example" d $config_user $config_owner 755 - assert_file "$ESCONFIG/jvm-example/example.yaml" f $config_user $config_owner 644 + assert_file "$ESCONFIG/jvm-example" d $config_user $config_owner 750 + assert_file "$ESCONFIG/jvm-example/example.yaml" f $config_user $config_owner 660 + + run sudo -E -u vagrant LANG="en_US.UTF-8" cat "$ESCONFIG/jvm-example/example.yaml" + [ $status = 1 ] + [[ "$output" == *"Permission denied"* ]] || { + echo "Expected permission denied but found $output:" + false + } echo "Running jvm-example's bin script...." "$ESHOME/bin/jvm-example/test" | grep test @@ -123,20 +129,28 @@ install_and_check_plugin() { shift if [ "$prefix" == "-" ]; then - local fullName="$name" + local full_name="$name" else - local fullName="$prefix-$name" + local full_name="$prefix-$name" fi - install_jvm_plugin $fullName "$(readlink -m $fullName-*.zip)" + install_jvm_plugin $full_name "$(readlink -m $full_name-*.zip)" - assert_module_or_plugin_directory "$ESPLUGINS/$fullName" + assert_module_or_plugin_directory "$ESPLUGINS/$full_name" + # analysis plugins have a corresponding analyzers jar if [ $prefix == 'analysis' ]; then - assert_module_or_plugin_file "$ESPLUGINS/$fullName/lucene-analyzers-$name-*.jar" + local analyzer_jar_suffix=$name + # the name of the analyzer jar for the ukrainian plugin does + # not match the name of the plugin, so we have to make an + # exception + if [ $name == 'ukrainian' ]; then + analyzer_jar_suffix='morfologik' + fi + assert_module_or_plugin_file "$ESPLUGINS/$full_name/lucene-analyzers-$analyzer_jar_suffix-*.jar" fi for file in "$@"; do - assert_module_or_plugin_file "$ESPLUGINS/$fullName/$file" + assert_module_or_plugin_file "$ESPLUGINS/$full_name/$file" done } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash index 0ea86ddcc6e..b5edebaf41c 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash @@ -79,16 +79,19 @@ export_elasticsearch_paths() { # Checks that all directories & files are correctly installed # after a archive (tar.gz/zip) install verify_archive_installation() { - assert_file "$ESHOME" d - assert_file "$ESHOME/bin" d - assert_file "$ESHOME/bin/elasticsearch" f - assert_file "$ESHOME/bin/elasticsearch.in.sh" f - assert_file "$ESHOME/bin/elasticsearch-plugin" f - assert_file "$ESCONFIG" d - assert_file "$ESCONFIG/elasticsearch.yml" f - assert_file "$ESCONFIG/log4j2.properties" f - assert_file "$ESHOME/lib" d - assert_file "$ESHOME/NOTICE.txt" f - assert_file "$ESHOME/LICENSE.txt" f - assert_file "$ESHOME/README.textile" f + assert_file "$ESHOME" d elasticsearch elasticsearch 755 + assert_file "$ESHOME/bin" d elasticsearch elasticsearch 755 + assert_file "$ESHOME/bin/elasticsearch" f elasticsearch elasticsearch 755 + assert_file "$ESHOME/bin/elasticsearch.in.sh" f elasticsearch elasticsearch 755 + assert_file "$ESHOME/bin/elasticsearch-plugin" f elasticsearch elasticsearch 755 + assert_file "$ESHOME/bin/elasticsearch-translog" f elasticsearch elasticsearch 755 + assert_file "$ESCONFIG" d elasticsearch elasticsearch 755 + assert_file "$ESCONFIG/elasticsearch.yml" f elasticsearch elasticsearch 660 + assert_file "$ESCONFIG/jvm.options" f elasticsearch elasticsearch 660 + assert_file "$ESCONFIG/log4j2.properties" f elasticsearch elasticsearch 660 + assert_file "$ESPLUGINS" d elasticsearch elasticsearch 755 + assert_file "$ESHOME/lib" d elasticsearch elasticsearch 755 + assert_file "$ESHOME/NOTICE.txt" f elasticsearch elasticsearch 644 + assert_file "$ESHOME/LICENSE.txt" f elasticsearch elasticsearch 644 + assert_file "$ESHOME/README.textile" f elasticsearch elasticsearch 644 } diff --git a/qa/vagrant/versions b/qa/vagrant/versions index 654a95a3a25..0062ac97180 100644 --- a/qa/vagrant/versions +++ b/qa/vagrant/versions @@ -1 +1 @@ -6.0.0-alpha1-SNAPSHOT +5.0.0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json index 4bc93460b75..d30c6ace6f7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json @@ -33,6 +33,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json index 6799a67bae8..7c826890607 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json @@ -38,6 +38,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json index 73803bac867..4311d9a3be1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json @@ -33,6 +33,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json index b9776ab5881..88c7eee1262 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json @@ -38,6 +38,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json index 349bf37aa06..e858e83b465 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json @@ -29,6 +29,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "ts": { "type": "boolean", "description": "Set to false to disable timestamping", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json index f21e6485d58..e893fb1b044 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json @@ -12,6 +12,10 @@ "type": "boolean", "description": "Return help information", "default": false + }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json index 03b67fd14c2..42a47253de7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json @@ -49,6 +49,10 @@ "description": "Set to true to return stats only for primary shards", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json index bdf474c7a02..ab87b2adb7e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json @@ -29,6 +29,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json index 4e44eb820f1..3d8a4a77a77 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json @@ -29,6 +29,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json index f0fcf390102..1b3c1266a63 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json @@ -12,6 +12,10 @@ "type" : "string", "description" : "a short version of the Accept header, e.g. json, yaml" }, + "full_id": { + "type" : "boolean", + "description" : "Return the full node ID instead of the shortened version (default: false)" + }, "local": { "type" : "boolean", "description" : "Return local information, do not retrieve the state from master node (default: false)" @@ -29,6 +33,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json index 74fc54b9604..983b82482ae 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json @@ -29,6 +29,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json index 5822f64c8e2..93c7feababa 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json @@ -27,6 +27,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json index b220a5eda74..42f91cedfdd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json @@ -34,6 +34,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json index ace0a1ea3c9..c640a568fde 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json @@ -30,6 +30,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json index eed4a627736..118f8b6bf96 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json @@ -25,6 +25,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json index a9a1a30770f..db46ce909ff 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json @@ -33,6 +33,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json index 68468dd542a..90f4ca32730 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json @@ -3,8 +3,10 @@ "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html", "methods": ["GET"], "url": { - "path": "/_cat/snapshots/{repository}", - "paths": ["/_cat/snapshots/{repository}"], + "path": "/_cat/snapshots", + "paths": [ + "/_cat/snapshots", + "/_cat/snapshots/{repository}"], "parts": { "repository": { "type" : "list", @@ -35,6 +37,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json index 73f1281ff5f..d7b7acd5a4f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json @@ -41,6 +41,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json index f8aaa72723a..f0757c2d652 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json @@ -33,6 +33,10 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json index d6ba5ebc6b1..70a11000bbe 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json @@ -4,8 +4,12 @@ "methods": ["GET"], "url": { "path": "/_cat/thread_pool", - "paths": ["/_cat/thread_pool","/_cat/thread_pool/{thread_pools}"], + "paths": ["/_cat/thread_pool","/_cat/thread_pool/{thread_pool_patterns}"], "parts": { + "thread_pool_patterns": { + "type": "list", + "description": "A comma-separated list of regular-expressions to filter the thread pools in the output" + } }, "params": { "format": { @@ -34,14 +38,14 @@ "description": "Return help information", "default": false }, + "s": { + "type": "list", + "description" : "Comma-separated list of column names or column aliases to sort by" + }, "v": { "type": "boolean", "description": "Verbose mode. Display column headers", "default": false - }, - "thread_pool_patterns": { - "type": "list", - "description": "A comma-separated list of regular-expressions to filter the thread pools in the output" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json index f1a6a98217c..06828a6588a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json @@ -41,7 +41,7 @@ "wait_for_events": { "type" : "enum", "options" : ["immediate", "urgent", "high", "normal", "low", "languid"], - "description" : "Wait until all currently queued events with the given priorty are processed" + "description" : "Wait until all currently queued events with the given priority are processed" }, "wait_for_no_relocating_shards": { "type" : "boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json index 9048f982712..0e2697cd524 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json @@ -67,10 +67,6 @@ "lenient": { "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" - }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json new file mode 100644 index 00000000000..31c9d51f673 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json @@ -0,0 +1,75 @@ +{ + "create": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html", + "methods": ["PUT","POST"], + "url": { + "path": "/{index}/{type}/{id}/_create", + "paths": ["/{index}/{type}/{id}/_create"], + "parts": { + "id": { + "type" : "string", + "required" : true, + "description" : "Document ID" + }, + "index": { + "type" : "string", + "required" : true, + "description" : "The name of the index" + }, + "type": { + "type" : "string", + "required" : true, + "description" : "The type of the document" + } + }, + "params": { + "wait_for_active_shards": { + "type" : "string", + "description" : "Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" + }, + "parent": { + "type" : "string", + "description" : "ID of the parent document" + }, + "refresh": { + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes." + }, + "routing": { + "type" : "string", + "description" : "Specific routing value" + }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + }, + "timestamp": { + "type" : "time", + "description" : "Explicit timestamp for the document" + }, + "ttl": { + "type" : "time", + "description" : "Expiration time for the document" + }, + "version" : { + "type" : "number", + "description" : "Explicit version number for concurrency control" + }, + "version_type": { + "type" : "enum", + "options" : ["internal", "external", "external_gte", "force"], + "description" : "Specific version type" + }, + "pipeline" : { + "type" : "string", + "description" : "The pipeline id to preprocess incoming documents with" + } + } + }, + "body": { + "description" : "The document", + "required" : true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index a734f7b1bac..ac84a3cc573 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -36,18 +36,6 @@ "type" : "string", "description" : "The field to use as default where no field prefix is given in the query string" }, - "explain": { - "type" : "boolean", - "description" : "Specify whether to return detailed information about score computation as part of a hit" - }, - "stored_fields": { - "type" : "list", - "description" : "A comma-separated list of stored fields to return as part of a hit" - }, - "docvalue_fields": { - "type" : "list", - "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit" - }, "from": { "type" : "number", "description" : "Starting offset (default: 0)" @@ -77,10 +65,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" @@ -94,7 +78,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { @@ -134,32 +118,6 @@ "type" : "list", "description" : "Specific 'tag' of the request for logging and statistical purposes" }, - "suggest_field": { - "type" : "string", - "description" : "Specify which field to use for suggestions" - }, - "suggest_mode": { - "type" : "enum", - "options" : ["missing", "popular", "always"], - "default" : "missing", - "description" : "Specify suggest mode" - }, - "suggest_size": { - "type" : "number", - "description" : "How many suggestions to return in response" - }, - "suggest_text": { - "type" : "text", - "description" : "The source text for which the suggestions should be returned" - }, - "timeout": { - "type" : "time", - "description" : "Explicit operation timeout" - }, - "track_scores": { - "type" : "boolean", - "description": "Whether to calculate and return scores even if they are not used for sorting" - }, "version": { "type" : "boolean", "description" : "Specify whether to return document version as part of a hit" @@ -182,7 +140,7 @@ "description" : "Sets the number of shard copies that must be active before proceeding with the delete by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, "scroll_size": { - "type": "integer", + "type": "number", "defaut_value": 100, "description": "Size on the scroll request powering the update_by_query" }, @@ -192,9 +150,14 @@ "description" : "Should the request should block until the delete-by-query is complete." }, "requests_per_second": { - "type": "float", + "type": "number", "default": 0, - "description": "The throttle for this request in sub-requests per second. -1 means set no throttle." + "description": "The throttle for this request in sub-requests per second. -1 means no throttle." + }, + "slices": { + "type": "integer", + "default": 1, + "description": "The number of slices this task should be divided into. Defaults to 1 meaning the task isn't sliced into subtasks." } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json index 328794ffdd0..0f0d8c132b3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json @@ -49,10 +49,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" - }, "parent": { "type" : "string", "description" : "The ID of the parent document" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 677219addee..814a53c1141 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -54,7 +54,7 @@ "description" : "Explicit timestamp for the document" }, "ttl": { - "type" : "duration", + "type" : "time", "description" : "Expiration time for the document" }, "version" : { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json index 881382ffa00..93965388916 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json @@ -12,22 +12,6 @@ } }, "params": { - "analyzer": { - "type" : "string", - "description" : "The name of the analyzer to use" - }, - "char_filter": { - "type" : "list", - "description" : "A comma-separated list of character filters to use for the analysis" - }, - "field": { - "type" : "string", - "description" : "Use the analyzer configured for this field (instead of passing the analyzer name)" - }, - "filter": { - "type" : "list", - "description" : "A comma-separated list of filters to use for the analysis" - }, "index": { "type" : "string", "description" : "The name of the index to scope the operation" @@ -36,22 +20,6 @@ "type" : "boolean", "description" : "With `true`, specify that a local shard should be used if available, with `false`, use a random shard (default: true)" }, - "text": { - "type" : "list", - "description" : "The text on which the analysis should be performed (when request body is not used)" - }, - "tokenizer": { - "type" : "string", - "description" : "The name of the tokenizer to use for the analysis" - }, - "explain": { - "type" : "boolean", - "description" : "With `true`, outputs more advanced details. (default: false)" - }, - "attributes": { - "type" : "list", - "description" : "A comma-separated list of token attributes to output, this parameter works only with `explain=true`" - }, "format": { "type": "enum", "options" : ["detailed","text"], @@ -61,7 +29,7 @@ } }, "body": { - "description" : "The text on which the analysis should be performed" + "description" : "Define analyzer/tokenizer parameters and the text on which the analysis should be performed" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json index ebfd5cc3a73..4ab053cd118 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json @@ -13,14 +13,6 @@ } }, "params": { - "ignore_unavailable": { - "type" : "boolean", - "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices": { - "type" : "boolean", - "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, "expand_wildcards": { "type" : "enum", "options" : ["open","closed","none","all"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json index 97580182ea1..5e5ba1367ad 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json @@ -22,6 +22,10 @@ "type" : "time", "description" : "Explicit operation timeout" }, + "dry_run": { + "type" : "boolean", + "description" : "If set to true the rollover action will only be validated but not actually performed even if a condition matches. The default is false" + }, "master_timeout": { "type" : "time", "description" : "Specify timeout for connection to master" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json index 66111456719..1e2413ee723 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json @@ -13,8 +13,8 @@ }, "params": { "allow_no_indices": { - "type" : "boolean", - "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" }, "expand_wildcards": { "type" : "enum", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json index 98af689833a..7a0977da194 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json @@ -63,10 +63,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" - }, "rewrite": { "type": "boolean", "description": "Provide a more detailed explanation showing the actual Lucene query that will be executed." diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json b/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json index 1f1f5adf75e..46f595b2186 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json @@ -32,6 +32,10 @@ "type" : "boolean", "description" : "Refresh the shard containing the document before performing the operation" }, + "routing": { + "type" : "string", + "description" : "Specific routing value" + }, "_source": { "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json index fb9ef094f0b..665d6bd7a2c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json @@ -52,8 +52,8 @@ }, "level": { "type" : "enum", - "description": "Return indices stats aggregated at node, index or shard level", - "options" : ["node", "indices", "shards"], + "description": "Return indices stats aggregated at index, node or shard level", + "options" : ["indices", "node", "shards"], "default" : "node" }, "types" : { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json index 5fb4fe58db3..68937670a79 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json @@ -26,9 +26,14 @@ "description" : "Should the request should block until the reindex is complete." }, "requests_per_second": { - "type": "float", + "type": "number", "default": 0, - "description": "The throttle to set on this request in sub-requests per second. -1 means set no throttle as does \"unlimited\" which is the only non-float this accepts." + "description": "The throttle to set on this request in sub-requests per second. -1 means no throttle." + }, + "slices": { + "type": "integer", + "default": 1, + "description": "The number of slices this task should be divided into. Defaults to 1 meaning the task isn't sliced into subtasks." } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json index 5be7ea27407..4bba41d37d5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json @@ -13,7 +13,7 @@ }, "params": { "requests_per_second": { - "type": "float", + "type": "number", "required": true, "description": "The throttle to set on this request in floating sub-requests per second. -1 means set no throttle." } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json index 885b746d095..699ddcc9e00 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json @@ -13,7 +13,7 @@ }, "params": { "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "scroll_id": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 21fda8dc805..2cf359ede16 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -72,10 +72,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" @@ -89,7 +85,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { @@ -140,7 +136,7 @@ "description" : "How many suggestions to return in response" }, "suggest_text": { - "type" : "text", + "type" : "string", "description" : "The source text for which the suggestions should be returned" }, "timeout": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json index ff1d35bb417..b9339b55332 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json @@ -39,7 +39,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index d87e4c5e7f5..7e7fffcee07 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -73,7 +73,7 @@ "description": "Explicit timestamp for the document" }, "ttl": { - "type": "duration", + "type": "time", "description": "Expiration time for the document" }, "version": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index b7f608b8b4f..47497d8806a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -36,22 +36,6 @@ "type" : "string", "description" : "The field to use as default where no field prefix is given in the query string" }, - "explain": { - "type" : "boolean", - "description" : "Specify whether to return detailed information about score computation as part of a hit" - }, - "stored_fields": { - "type" : "list", - "description" : "A comma-separated list of stored fields to return as part of a hit" - }, - "docvalue_fields": { - "type" : "list", - "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit" - }, - "fielddata_fields": { - "type" : "list", - "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit" - }, "from": { "type" : "number", "description" : "Starting offset (default: 0)" @@ -69,7 +53,7 @@ "type" : "enum", "options": ["abort", "proceed"], "default": "abort", - "description" : "What to do when the reindex hits version conflicts?" + "description" : "What to do when the update by query hits version conflicts?" }, "expand_wildcards": { "type" : "enum", @@ -81,10 +65,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" - }, "pipeline": { "type" : "string", "description" : "Ingest pipeline to set on index requests made by this action. (default: none)" @@ -102,7 +82,7 @@ "description" : "A comma-separated list of specific routing values" }, "scroll": { - "type" : "duration", + "type" : "time", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" }, "search_type": { @@ -142,32 +122,6 @@ "type" : "list", "description" : "Specific 'tag' of the request for logging and statistical purposes" }, - "suggest_field": { - "type" : "string", - "description" : "Specify which field to use for suggestions" - }, - "suggest_mode": { - "type" : "enum", - "options" : ["missing", "popular", "always"], - "default" : "missing", - "description" : "Specify suggest mode" - }, - "suggest_size": { - "type" : "number", - "description" : "How many suggestions to return in response" - }, - "suggest_text": { - "type" : "text", - "description" : "The source text for which the suggestions should be returned" - }, - "timeout": { - "type" : "time", - "description" : "Explicit operation timeout" - }, - "track_scores": { - "type" : "boolean", - "description": "Whether to calculate and return scores even if they are not used for sorting" - }, "version": { "type" : "boolean", "description" : "Specify whether to return document version as part of a hit" @@ -194,19 +148,24 @@ "description" : "Sets the number of shard copies that must be active before proceeding with the update by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, "scroll_size": { - "type": "integer", + "type": "number", "defaut_value": 100, "description": "Size on the scroll request powering the update_by_query" }, "wait_for_completion": { "type" : "boolean", "default": false, - "description" : "Should the request should block until the reindex is complete." + "description" : "Should the request should block until the update by query operation is complete." }, "requests_per_second": { - "type": "float", + "type": "number", "default": 0, - "description": "The throttle to set on this request in sub-requests per second. -1 means set no throttle as does \"unlimited\" which is the only non-float this accepts." + "description": "The throttle to set on this request in sub-requests per second. -1 means no throttle." + }, + "slices": { + "type": "integer", + "default": 1, + "description": "The number of slices this task should be divided into. Defaults to 1 meaning the task isn't sliced into subtasks." } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index 612831575e1..db33513962f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -10,7 +10,7 @@ Elasticsearch as follows: [source,sh] --------------------- -bin/elasticsearch -E script.inline true -E node.attr.testattr test -E path.repo /tmp -E repositories.url.allowed_urls 'http://snapshot.*' +bin/elasticsearch -Enode.attr.testattr=test -Epath.repo=/tmp -Erepositories.url.allowed_urls='http://snapshot.*' --------------------- ======================================= diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml index 770c5c8b441..fc7eb456892 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml @@ -216,3 +216,50 @@ - \s+ $/ +--- +"Alias sorting": + + - skip: + version: " - 5.0.99" + reason: sorting was introduced in 5.1.0 + + - do: + indices.create: + index: test_index + body: + aliases: + test_alias: {} + my_alias: {} + + - do: + indices.create: + index: other_index + body: + aliases: + other_alias: {} + + - do: + cat.aliases: + h: [alias, index] + s: [index, alias] + + - match: + $body: | + /^ + other_alias \s+ other_index\n + my_alias \s+ test_index\n + test_alias \s+ test_index\n + $/ + + - do: + cat.aliases: + h: [alias, index] + s: [index, "a:desc"] + + - match: + $body: | + /^ + other_alias \s+ other_index\n + test_alias \s+ test_index\n + my_alias \s+ test_index\n + $/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml index 24619e53353..3e900132273 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml @@ -157,3 +157,61 @@ - match: $body: | /^(ba(r|z) \n?){2}$/ + +--- +"Test cat indices sort": + - skip: + version: " - 5.0.99" + reason: sorting was introduced in 5.1.0 + + - do: + indices.create: + index: foo + body: + settings: + number_of_shards: "1" + number_of_replicas: "0" + + - do: + indices.create: + index: bar + body: + settings: + number_of_shards: "1" + number_of_replicas: "0" + + - do: + indices.create: + index: baz + body: + settings: + number_of_shards: "1" + number_of_replicas: "0" + + - do: + indices.close: + index: bar + + - do: + cat.indices: + h: [status, index] + s: [status, index] + + - match: + $body: | + /^ close \s+ bar\n + open \s+ baz\n + open \s+ foo\n + $/ + + - do: + cat.indices: + h: [status, index] + s: [status, "index:desc"] + + - match: + $body: | + /^ close \s+ bar\n + open \s+ foo\n + open \s+ baz\n + $/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml index 391a7c1e6d1..9f4de56c863 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml @@ -57,3 +57,28 @@ - match: $body: | /^ http \n ((\d{1,3}\.){3}\d{1,3}:\d{1,5}\n)+ $/ + +--- +"Test cat nodes output with full_id set": + - skip: + version: " - 5.0.0" + reason: The full_id setting was rejected in 5.0.0 see #21266 + + + - do: + cat.nodes: + h: id + # check for a 4 char non-whitespace character string + - match: + $body: | + /^(\S{4}\n)+$/ + + - do: + cat.nodes: + h: id + full_id: true + # check for a 5+ char non-whitespace character string + - match: + $body: | + /^(\S{5,}\n)+$/ + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yaml index c7eb9c1f930..6d83274726e 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yaml @@ -43,3 +43,44 @@ /^ test_cat_repo_1\s+ fs\s*\n test_cat_repo_2\s+ fs\s*\n $/ + +--- +"Test cat repositories sort": + - skip: + version: " - 5.0.99" + reason: sorting was introduced in 5.1.0 + - do: + snapshot.create_repository: + repository: test_cat_repo_1 + body: + type: fs + settings: + location: "test_cat_repo_1_loc" + + - do: + snapshot.create_repository: + repository: test_cat_repo_2 + body: + type: fs + settings: + location: "test_cat_repo_2_loc" + + - do: + cat.repositories: + s: [type, id] + + - match: + $body: | + /^ test_cat_repo_1 \s+ fs \n + test_cat_repo_2 \s+ fs \n + $/ + + - do: + cat.repositories: + s: [type, "id:desc"] + + - match: + $body: | + /^ test_cat_repo_2 \s+ fs \n + test_cat_repo_1 \s+ fs \n + $/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml index eb94c4800bb..8c7cd83b0e0 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml @@ -227,3 +227,43 @@ - match: $body: | /^(ba(r|z) \n?){2}$/ + +--- +"Test cat shards sort": + - skip: + version: " - 5.0.99" + reason: sorting was introduced in 5.1.0 + + - do: + indices.create: + index: foo + body: + settings: + number_of_shards: "1" + number_of_replicas: "0" + + - do: + indices.create: + index: bar + body: + settings: + number_of_shards: "1" + number_of_replicas: "0" + + - do: + index: + index: bar + type: type + body: { test: bar } + refresh: true + + - do: + cat.shards: + h: [index, docs] + s: [docs] + + - match: # don't use the store here it's cached and might be stale + $body: | + /^ foo \s+ 0\n + bar \s+ 1\n + $/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.snapshots/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.snapshots/10_basic.yaml index c1a1fc97011..6e03ceb98c7 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.snapshots/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.snapshots/10_basic.yaml @@ -2,7 +2,6 @@ "Help": - do: cat.snapshots: - repository: test_cat_snapshots_1 help: true - match: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml index 6758bec39da..eb651f6b157 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml @@ -1,6 +1,8 @@ --- "Help": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: cat.templates: help: true @@ -15,7 +17,9 @@ --- "No templates": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: cat.templates: {} @@ -26,7 +30,9 @@ --- "Normal templates": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: indices.put_template: name: test @@ -72,7 +78,9 @@ --- "Filtered templates": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: indices.put_template: name: test @@ -111,7 +119,9 @@ --- "Column headers": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: indices.put_template: name: test @@ -145,7 +155,9 @@ --- "Select columns": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: indices.put_template: name: test @@ -174,3 +186,52 @@ \n $/ +--- +"Sort templates": + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 + - do: + indices.put_template: + name: test + body: + order: 0 + template: t* + settings: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + indices.put_template: + name: test_1 + body: + order: 0 + version: 1 + template: te* + settings: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + cat.templates: + h: [name, template, version] + s: [version] + + - match: + $body: | + /^ + test \s+ t\* \s+\n + test_1 \s+ te\* \s+ 1\n + $/ + + - do: + cat.templates: + h: [name, template, version] + s: ["version:desc"] + + - match: + $body: | + /^ + test_1 \s+ te\* \s+ 1\n + test \s+ t\* \s+\n + $/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml index a7d4e6c9901..9cd97034141 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml @@ -52,7 +52,7 @@ - match: $body: | /^ id \s+ name \s+ type \s+ active \s+ size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ min \s+ max \s+ keep_alive \n - (\S+ \s+ bulk \s+ fixed \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ + (\S+ \s+ bulk \s+ fixed \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - do: cat.thread_pool: @@ -63,8 +63,8 @@ - match: $body: | /^ id \s+ name \s+ type \s+ active \s+ size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ min \s+ max \s+ keep_alive \n - (\S+ \s+ fetch_shard_started \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n - \S+ \s+ fetch_shard_store \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ + (\S+ \s+ fetch_shard_started \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n + \S+ \s+ fetch_shard_store \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - do: cat.thread_pool: @@ -77,4 +77,3 @@ ^ (\S+ \s+ bulk \s+ \d+ \s+ \d+ \s+ \d+ \n \S+ \s+ index \s+ \d+ \s+ \d+ \s+ \d+ \n \S+ \s+ search \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yaml index 933033761e9..70f402691a3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yaml @@ -58,15 +58,6 @@ count: index: test q: field:BA* - lowercase_expanded_terms: false - - - match: {count : 0} - - - do: - count: - index: test - q: field:BA* - analyze_wildcard: true - match: {count : 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml index 1126a3d085c..ab993281938 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/15_without_id.yaml @@ -1,7 +1,7 @@ --- "Create without ID": - do: - catch: /Validation|Invalid/ + catch: param create: index: test_1 type: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml index 1923377ba83..4ea921a3fa0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml @@ -4,7 +4,9 @@ setup: index: test_1 body: aliases: - alias_1: {} + alias_1: { + "filter" : { "term" : { "foo" : "bar"} } + } - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yaml index 30fe6cc55b6..78ef8c4bc89 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yaml @@ -68,17 +68,6 @@ type: test id: 1 q: field:BA* - lowercase_expanded_terms: false - - - is_false: matched - - - do: - explain: - index: test - type: test - id: 1 - q: field:BA* - analyze_wildcard: true - is_true: matched diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml index e255ce510ed..c6631b83b18 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml @@ -86,3 +86,4 @@ id: 1 version: 1 version_type: external_gte + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml index 35d4a2b5222..268cd781289 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml @@ -8,7 +8,8 @@ setup: "Basic test": - do: indices.analyze: - text: Foo Bar + body: + text: Foo Bar - length: { tokens: 2 } - match: { tokens.0.token: foo } - match: { tokens.1.token: bar } @@ -17,9 +18,10 @@ setup: "Tokenizer and filter": - do: indices.analyze: - filter: lowercase - text: Foo Bar - tokenizer: keyword + body: + filter: [lowercase] + text: Foo Bar + tokenizer: keyword - length: { tokens: 1 } - match: { tokens.0.token: foo bar } @@ -38,9 +40,10 @@ setup: - do: indices.analyze: - field: text index: test - text: Foo Bar! + body: + field: text + text: Foo Bar! - length: { tokens: 2 } - match: { tokens.0.token: Foo } - match: { tokens.1.token: Bar! } @@ -52,14 +55,6 @@ setup: - length: {tokens: 1 } - match: { tokens.0.token: foo bar } --- -"Body params override query string": - - do: - indices.analyze: - text: Foo Bar - body: { "text": "Bar Foo", "filter": ["lowercase"], "tokenizer": keyword } - - length: {tokens: 1 } - - match: { tokens.0.token: bar foo } ---- "Array text": - do: indices.analyze: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yaml index 98fd6b3a984..5527c023b13 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yaml @@ -27,3 +27,35 @@ name: test_alias - match: {test_index.aliases.test_alias: {}} + +--- +"Can't create alias with invalid characters": + - skip: + version: " - 5.0.99" + reason: alias name validation was introduced in 5.1.0 + + - do: + indices.create: + index: test_index + + - do: + catch: request + indices.put_alias: + index: test_index + name: test_* + +--- +"Can't create alias with the same name as an index": + + - do: + indices.create: + index: test_index + - do: + indices.create: + index: foo + + - do: + catch: request + indices.put_alias: + index: test_index + name: foo diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml index 6c93dabeec7..bc2dace0e18 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/11_reset.yaml @@ -22,8 +22,7 @@ Test reset index settings: - do: indices.get_settings: flat_settings: false - - is_false: - test-index.settings.index\.refresh_interval + - is_false: test-index.settings.index\.refresh_interval - do: indices.get_settings: include_defaults: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yaml index 8c962407b30..a38f4329e33 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yaml @@ -71,3 +71,87 @@ - match: { hits.total: 1 } - match: { hits.hits.0._index: "logs-000002"} +--- +"Rollover no condition matched": + - skip: + version: " - 5.0.0" + reason: bug fixed in 5.0.1 + + # create index with alias + - do: + indices.create: + index: logs-1 + wait_for_active_shards: 1 + body: + aliases: + logs_index: {} + logs_search: {} + + # run again and verify results without rolling over + - do: + indices.rollover: + alias: "logs_search" + wait_for_active_shards: 1 + body: + conditions: + max_docs: 1 + + - match: { old_index: logs-1 } + - match: { new_index: logs-000002 } + - match: { rolled_over: false } + - match: { dry_run: false } + - match: { conditions: { "[max_docs: 1]": false } } + +--- +"Rollover with dry-run but target index exists": + + - skip: + version: " - 5.0.0" + reason: bug fixed in 5.0.1 - dry run was returning just fine even if the index exists + + # create index with alias + - do: + indices.create: + index: logs-1 + wait_for_active_shards: 1 + body: + aliases: + logs_index: {} + logs_search: {} + + - do: + indices.create: + index: logs-000002 + + - do: + catch: /index_already_exists_exception/ + indices.rollover: + dry_run: true + alias: "logs_search" + wait_for_active_shards: 1 + body: + conditions: + max_docs: 1 + + # also do it without dry_run + - do: + catch: /index_already_exists_exception/ + indices.rollover: + dry_run: false + alias: "logs_search" + wait_for_active_shards: 1 + body: + conditions: + max_docs: 1 + + - do: + catch: /invalid_index_name_exception/ + indices.rollover: + new_index: invalid|index|name + dry_run: true + alias: "logs_search" + wait_for_active_shards: 1 + body: + conditions: + max_docs: 1 + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml index 9569728ce7d..62a75b0ff04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml @@ -1,15 +1,26 @@ --- "Shrink index via API": - # creates an index with one document. - # relocates all it's shards to one node - # shrinks it into a new index with a single shard + # creates an index with one document solely allocated on the master node + # and shrinks it into a new index with a single shard + # we don't do the relocation to a single node after the index is created + # here since in a mixed version cluster we can't identify + # which node is the one with the highest version and that is the only one that can safely + # be used to shrink the index. + - do: + cluster.state: {} + # Get master node id + + - set: { master_node: master } + - do: indices.create: index: source wait_for_active_shards: 1 body: settings: - number_of_replicas: "0" + # ensure everything is allocated on a single node + index.routing.allocation.include._id: $master + number_of_replicas: 0 - do: index: index: source @@ -28,18 +39,11 @@ - match: { _id: "1" } - match: { _source: { foo: "hello world" } } - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - # relocate everything to the master node and make it read-only + # make it read-only - do: indices.put_settings: index: source body: - index.routing.allocation.include._id: $master index.blocks.write: true index.number_of_replicas: 0 @@ -47,8 +51,6 @@ cluster.health: wait_for_status: green index: source - wait_for_no_relocating_shards: true - wait_for_events: "languid" # now we do the actual shrink - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml index 14e258a6bb4..66da068895f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/30_remove_index_and_replace_with_alias.yaml @@ -31,4 +31,5 @@ - do: indices.get_mapping: index: test - - is_true: test_2 # the name of the index that the alias points to, would be `test` if the index were still there + # the name of the index that the alias points to, would be `test` if the index were still there + - is_true: test_2 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yaml index e696a5600bc..9d2245b4b40 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yaml @@ -9,14 +9,80 @@ index: number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - do: indices.upgrade: index: test_index - match: {upgraded_indices.test_index.oldest_lucene_segment_version: '/(\d\.)+\d/'} - is_true: upgraded_indices.test_index.upgrade_version + +--- +"Upgrade indices ignore unavailable": + - skip: + version: " - 5.0.0" + reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 + + - do: + indices.create: + index: test_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + indices.upgrade: + index: ["does_not_exist", "test_index"] + ignore_unavailable: true + + - match: {_shards.total: 1} + - is_true: upgraded_indices.test_index.upgrade_version + - is_false: upgraded_indices.does_not_exist + +--- +"Upgrade indices allow no indices": + + - skip: + version: " - 5.0.0" + reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 + + - do: + indices.upgrade: + index: test_index + ignore_unavailable: true + allow_no_indices: true + + - match: {_shards.total: 0} + +--- +"Upgrade indices disallow no indices": + + - skip: + version: " - 5.0.0" + reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 + + - do: + catch: missing + indices.upgrade: + index: test_index + ignore_unavailable: true + allow_no_indices: false + +--- +"Upgrade indices disallow unavailable": + + - skip: + version: " - 5.0.0" + reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 + + - do: + indices.create: + index: test_index + + - do: + catch: missing + indices.upgrade: + index: ["test_index", "does_not_exist"] + ignore_unavailable: false + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml index a1f9aa87636..637ebd4253e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml @@ -5,12 +5,24 @@ setup: body: settings: number_of_replicas: 0 + aliases: + alias_1: { + "filter" : { "match_all" : {} } + } --- "Validate query api": - do: indices.validate_query: q: query string + index: testing + + - is_true: valid + + - do: + indices.validate_query: + q: query string + index: alias_1 - is_true: valid diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yaml index c6dd323aa6c..3f96009c12a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yaml @@ -49,15 +49,6 @@ indices.validate_query: index: test q: field:BA* - lowercase_expanded_terms: false - - - is_true: valid - - - do: - indices.validate_query: - index: test - q: field:BA* - analyze_wildcard: true - is_true: valid diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/14_alias_to_multiple_indices.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/14_alias_to_multiple_indices.yaml new file mode 100644 index 00000000000..3a0fec04738 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/14_alias_to_multiple_indices.yaml @@ -0,0 +1,42 @@ +--- +"Multi Get with alias that resolves to multiple indices": + + - do: + bulk: + refresh: true + body: | + {"index": {"_index": "test_1", "_type": "test", "_id": 1}} + { "foo": "bar" } + {"index": {"_index": "test_2", "_type": "test", "_id": 2}} + { "foo": "bar" } + {"index": {"_index": "test_3", "_type": "test", "_id": 3}} + { "foo": "bar" } + + - do: + indices.put_alias: + index: test_2 + name: test_two_and_three + + - do: + indices.put_alias: + index: test_3 + name: test_two_and_three + + - do: + mget: + body: + docs: + - { _index: test_1, _type: test, _id: 1} + - { _index: test_two_and_three, _type: test, _id: 2} + + - is_true: docs.0.found + - match: { docs.0._index: test_1 } + - match: { docs.0._type: test } + - match: { docs.0._id: "1" } + + - is_false: docs.1.found + - match: { docs.1._index: test_two_and_three } + - match: { docs.1._type: test } + - match: { docs.1._id: "2" } + - match: { docs.1.error.root_cause.0.type: "illegal_argument_exception" } + - match: { docs.1.error.root_cause.0.reason: "/Alias.\\[test_two_and_three\\].has.more.than.one.indices.associated.with.it.\\[\\[test_[23]{1},.test_[23]{1}\\]\\],.can't.execute.a.single.index.op/" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yaml index 977d002748a..1cd9fef0258 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yaml @@ -6,3 +6,17 @@ - is_true: cluster_name - is_true: nodes + +--- +"Nodes stats level": + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.stats: + metric: [ indices ] + level: "indices" + + - is_true: nodes.$master.indices.indices diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml index 5443059135a..1695bdb2352 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yaml @@ -21,10 +21,9 @@ scroll: 1m sort: foo body: - slice: { - id: 0, + slice: + id: 0 max: 3 - } query: match_all: {} @@ -41,10 +40,9 @@ size: 1 scroll: 1m body: - slice: { - id: 0, + slice: + id: 0 max: 1025 - } query: match_all: {} @@ -60,10 +58,9 @@ size: 1 scroll: 1m body: - slice: { - id: 0, + slice: + id: 0 max: 1025 - } query: match_all: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml index c35e79e6cfe..029b44544fd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -47,30 +47,30 @@ setup: type: test id: 3 body: { "str": "bcd" } - + - do: indices.refresh: {} - + - do: search: body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str" } } } } - match: { hits.total: 3 } - + - length: { aggregations.str_terms.buckets: 2 } - + - match: { aggregations.str_terms.buckets.0.key: "abc" } - + - is_false: aggregations.str_terms.buckets.0.key_as_string - + - match: { aggregations.str_terms.buckets.0.doc_count: 2 } - + - match: { aggregations.str_terms.buckets.1.key: "bcd" } - + - is_false: aggregations.str_terms.buckets.1.key_as_string - + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } - + --- "IP test": - do: @@ -112,9 +112,9 @@ setup: - match: { aggregations.ip_terms.buckets.0.doc_count: 2 } - match: { aggregations.ip_terms.buckets.1.key: "127.0.0.1" } - + - is_false: aggregations.ip_terms.buckets.1.key_as_string - + - match: { aggregations.ip_terms.buckets.1.doc_count: 1 } - do: @@ -142,7 +142,7 @@ setup: search: body: { "size" : 0, "aggs" : { "ip_terms" : { "terms" : { "field" : "ip", "exclude" : "127.*" } } } } - + --- "Boolean test": @@ -327,7 +327,7 @@ setup: - match: { aggregations.date_terms.buckets.1.key_as_string: "2014-09-01T00:00:00.000Z" } - match: { aggregations.date_terms.buckets.1.doc_count: 1 } - + - do: search: body: { "size" : 0, "aggs" : { "date_terms" : { "terms" : { "field" : "date", "include" : [ "2016-05-03" ] } } } } @@ -335,11 +335,11 @@ setup: - match: { hits.total: 3 } - length: { aggregations.date_terms.buckets: 1 } - + - match: { aggregations.date_terms.buckets.0.key_as_string: "2016-05-03T00:00:00.000Z" } - - - match: { aggregations.date_terms.buckets.0.doc_count: 2 } - + + - match: { aggregations.date_terms.buckets.0.doc_count: 2 } + - do: search: body: { "size" : 0, "aggs" : { "date_terms" : { "terms" : { "field" : "date", "exclude" : [ "2016-05-03" ] } } } } @@ -347,7 +347,7 @@ setup: - match: { hits.total: 3 } - length: { aggregations.date_terms.buckets: 1 } - + - match: { aggregations.date_terms.buckets.0.key_as_string: "2014-09-01T00:00:00.000Z" } - - match: { aggregations.date_terms.buckets.0.doc_count: 1 } + - match: { aggregations.date_terms.buckets.0.doc_count: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yaml new file mode 100644 index 00000000000..e2b6ff0e9f5 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yaml @@ -0,0 +1,226 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + test: + properties: + ip: + type: ip + double: + type: double + date: + type: date + + - do: + cluster.health: + wait_for_status: green + +--- +"Double range": + - do: + index: + index: test + type: test + id: 1 + body: { "double" : 42 } + + - do: + index: + index: test + type: test + id: 2 + body: { "double" : 100 } + + - do: + index: + index: test + type: test + id: 3 + body: { "double" : 50 } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "double_range" : { "range" : { "field" : "double", "ranges": [ { "to": 50 }, { "from": 50, "to": 150 }, { "from": 150 } ] } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.double_range.buckets: 3 } + + - match: { aggregations.double_range.buckets.0.key: "*-50.0" } + + - is_false: aggregations.double_range.buckets.0.from + + - match: { aggregations.double_range.buckets.0.to: 50.0 } + + - match: { aggregations.double_range.buckets.0.doc_count: 1 } + + - match: { aggregations.double_range.buckets.1.key: "50.0-150.0" } + + - match: { aggregations.double_range.buckets.1.from: 50.0 } + + - match: { aggregations.double_range.buckets.1.to: 150.0 } + + - match: { aggregations.double_range.buckets.1.doc_count: 2 } + + - match: { aggregations.double_range.buckets.2.key: "150.0-*" } + + - match: { aggregations.double_range.buckets.2.from: 150.0 } + + - is_false: aggregations.double_range.buckets.2.to + + - match: { aggregations.double_range.buckets.2.doc_count: 0 } + + - do: + search: + body: { "size" : 0, "aggs" : { "double_range" : { "range" : { "field" : "double", "ranges": [ { "from": null, "to": 50 }, { "from": 50, "to": 150 }, { "from": 150, "to": null } ] } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.double_range.buckets: 3 } + + - match: { aggregations.double_range.buckets.0.key: "*-50.0" } + + - is_false: aggregations.double_range.buckets.0.from + + - match: { aggregations.double_range.buckets.0.to: 50.0 } + + - match: { aggregations.double_range.buckets.0.doc_count: 1 } + + - match: { aggregations.double_range.buckets.1.key: "50.0-150.0" } + + - match: { aggregations.double_range.buckets.1.from: 50.0 } + + - match: { aggregations.double_range.buckets.1.to: 150.0 } + + - match: { aggregations.double_range.buckets.1.doc_count: 2 } + + - match: { aggregations.double_range.buckets.2.key: "150.0-*" } + + - match: { aggregations.double_range.buckets.2.from: 150.0 } + + - is_false: aggregations.double_range.buckets.2.to + + - match: { aggregations.double_range.buckets.2.doc_count: 0 } + +--- +"IP range": + - do: + index: + index: test + type: test + id: 1 + body: { "ip" : "::1" } + + - do: + index: + index: test + type: test + id: 2 + body: { "ip" : "192.168.0.1" } + + - do: + index: + index: test + type: test + id: 3 + body: { "ip" : "192.168.0.7" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "ip_range" : { "ip_range" : { "field" : "ip", "ranges": [ { "to": "192.168.0.0" }, { "from": "192.168.0.0", "to": "192.169.0.0" }, { "from": "192.169.0.0" } ] } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.ip_range.buckets: 3 } + +# ip_range does not automatically add keys to buckets, see #21045 +# - match: { aggregations.ip_range.buckets.0.key: "*-192.168.0.0" } + + - is_false: aggregations.ip_range.buckets.0.from + + - match: { aggregations.ip_range.buckets.0.to: "192.168.0.0" } + + - match: { aggregations.ip_range.buckets.0.doc_count: 1 } + +# - match: { aggregations.ip_range.buckets.1.key: "192.168.0.0-192.169.0.0" } + + - match: { aggregations.ip_range.buckets.1.from: "192.168.0.0" } + + - match: { aggregations.ip_range.buckets.1.to: "192.169.0.0" } + + - match: { aggregations.ip_range.buckets.1.doc_count: 2 } + +# - match: { aggregations.ip_range.buckets.2.key: "192.169.0.0-*" } + + - match: { aggregations.ip_range.buckets.2.from: "192.169.0.0" } + + - is_false: aggregations.ip_range.buckets.2.to + + - match: { aggregations.ip_range.buckets.2.doc_count: 0 } + + - do: + search: + body: { "size" : 0, "aggs" : { "ip_range" : { "ip_range" : { "field" : "ip", "ranges": [ { "from": null, "to": "192.168.0.0" }, { "from": "192.168.0.0", "to": "192.169.0.0" }, { "from": "192.169.0.0", "to": null } ] } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.ip_range.buckets: 3 } + +# - match: { aggregations.ip_range.buckets.0.key: "*-192.168.0.0" } + + - is_false: aggregations.ip_range.buckets.0.from + + - match: { aggregations.ip_range.buckets.0.to: "192.168.0.0" } + + - match: { aggregations.ip_range.buckets.0.doc_count: 1 } + +# - match: { aggregations.ip_range.buckets.1.key: "192.168.0.0-192.169.0.0" } + + - match: { aggregations.ip_range.buckets.1.from: "192.168.0.0" } + + - match: { aggregations.ip_range.buckets.1.to: "192.169.0.0" } + + - match: { aggregations.ip_range.buckets.1.doc_count: 2 } + +# - match: { aggregations.ip_range.buckets.2.key: "192.169.0.0-*" } + + - match: { aggregations.ip_range.buckets.2.from: "192.169.0.0" } + + - is_false: aggregations.ip_range.buckets.2.to + + - match: { aggregations.ip_range.buckets.2.doc_count: 0 } + + - do: + search: + body: { "size" : 0, "aggs" : { "ip_range" : { "ip_range" : { "field" : "ip", "ranges": [ { "mask": "::/24" }, { "mask": "192.168.0.0/16" } ] } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.ip_range.buckets: 2 } + + - match: { aggregations.ip_range.buckets.0.key: "::/24" } + + - match: { aggregations.ip_range.buckets.0.from: "::" } + + - match: { aggregations.ip_range.buckets.0.to: "0:ff:ffff:ffff:ffff:ffff:ffff:ffff" } + + - match: { aggregations.ip_range.buckets.0.doc_count: 3 } + + - match: { aggregations.ip_range.buckets.1.key: "192.168.0.0/16" } + + - match: { aggregations.ip_range.buckets.1.from: "192.168.0.0" } + + - match: { aggregations.ip_range.buckets.1.to: "192.168.255.255" } + + - match: { aggregations.ip_range.buckets.1.doc_count: 2 } + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yaml new file mode 100644 index 00000000000..2152e75f7e6 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yaml @@ -0,0 +1,80 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + post: + properties: + mentions: + type: keyword + user: + properties: + notifications: + type: keyword + + - do: + index: + index: test + type: test + id: foo|bar|baz0 + body: { "notifications" : ["abc"] } + + - do: + index: + index: test + type: test + id: foo|bar|baz1 + body: { "mentions" : ["abc"] } + + - do: + indices.refresh: {} + +--- +"Filter aggs with terms lookup ensure not cached": + - skip: + version: " - 5.0.0" + reason: This using filter aggs that needs rewriting, this was fixed in 5.0.1 + + - do: + search: + size: 0 + request_cache: true + body: {"aggs": { "itemsNotify": { "filter": { "terms": { "mentions": { "index": "test", "type": "test", "id": "foo|bar|baz0", "path": "notifications"}}}, "aggs": { "mentions" : {"terms" : { "field" : "mentions" }}}}}} + + # validate result + - match: { hits.total: 2 } + - match: { aggregations.itemsNotify.doc_count: 1 } + - length: { aggregations.itemsNotify.mentions.buckets: 1 } + - match: { aggregations.itemsNotify.mentions.buckets.0.key: "abc" } + # we are using a lookup - this should not cache + - do: + indices.stats: { index: test, metric: request_cache} + - match: { _shards.total: 1 } + - match: { _all.total.request_cache.hit_count: 0 } + - match: { _all.total.request_cache.miss_count: 0 } + - is_true: indices.test + +--- +"Filter aggs no lookup and ensure it's cached": + # now run without lookup and ensure we get cached or at least do the lookup + - do: + search: + size: 0 + request_cache: true + body: {"aggs": { "itemsNotify": { "filter": { "terms": { "mentions": ["abc"]}}, "aggs": { "mentions" : {"terms" : { "field" : "mentions" }}}}}} + + - match: { hits.total: 2 } + - match: { aggregations.itemsNotify.doc_count: 1 } + - length: { aggregations.itemsNotify.mentions.buckets: 1 } + - match: { aggregations.itemsNotify.mentions.buckets.0.key: "abc" } + - do: + indices.stats: { index: test, metric: request_cache} + - match: { _shards.total: 1 } + - match: { _all.total.request_cache.hit_count: 0 } + - match: { _all.total.request_cache.miss_count: 1 } + - is_true: indices.test + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml index 98e61dd9fa9..100b44dcb04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yaml @@ -5,19 +5,14 @@ setup: index: test body: mappings: - type_1: { - properties: { - nested_field : { + type_1: + properties: + nested_field: type: nested - } - } - } type_2: {} - type_3: { - _parent: { + type_3: + _parent: type: type_2 - } - } --- "Nested inner hits": @@ -26,13 +21,8 @@ setup: index: test type: type_1 id: 1 - body: { - "nested_field" : [ - { - "foo": "bar" - } - ] - } + body: + "nested_field" : [ { "foo": "bar" } ] - do: indices.refresh: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yaml index 6fb93bb1044..8533cfd2668 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yaml @@ -58,15 +58,6 @@ search: index: test q: field:BA* - lowercase_expanded_terms: false - - - match: {hits.total: 0} - - - do: - search: - index: test - q: field:BA* - analyze_wildcard: true - match: {hits.total: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml index da7af85cf9f..d47b52ce02c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yaml @@ -15,28 +15,33 @@ setup: "suggest_context": "type" : "completion" "contexts": - - "name" : "color" + - + "name" : "color" "type" : "category" "suggest_context_with_path": "type" : "completion" "contexts": - - "name" : "color" + - + "name" : "color" "type" : "category" "path" : "color" "suggest_geo": "type" : "completion" "contexts": - - "name" : "location" + - + "name" : "location" "type" : "geo" "precision" : "5km" "suggest_multi_contexts": "type" : "completion" "contexts": - - "name" : "location" + - + "name" : "location" "type" : "geo" "precision" : "5km" "path" : "location" - - "name" : "color" + - + "name" : "color" "type" : "category" "path" : "color" diff --git a/settings.gradle b/settings.gradle index 81513fd372a..4c662ac448f 100644 --- a/settings.gradle +++ b/settings.gradle @@ -36,6 +36,7 @@ List projects = [ 'plugins:analysis-phonetic', 'plugins:analysis-smartcn', 'plugins:analysis-stempel', + 'plugins:analysis-ukrainian', 'plugins:discovery-azure-classic', 'plugins:discovery-ec2', 'plugins:discovery-file', diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index 89fafc74c86..47051d9072d 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -36,7 +36,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.gateway.TestGatewayAllocator; @@ -54,8 +53,6 @@ import static java.util.Collections.emptyMap; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; -/** - */ public abstract class ESAllocationTestCase extends ESTestCase { private static final ClusterSettings EMPTY_CLUSTER_SETTINGS = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); @@ -105,19 +102,19 @@ public abstract class ESAllocationTestCase extends ESTestCase { } protected static DiscoveryNode newNode(String nodeName, String nodeId, Map attributes) { - return new DiscoveryNode(nodeName, nodeId, LocalTransportAddress.buildUnique(), attributes, MASTER_DATA_ROLES, Version.CURRENT); + return new DiscoveryNode(nodeName, nodeId, buildNewFakeTransportAddress(), attributes, MASTER_DATA_ROLES, Version.CURRENT); } protected static DiscoveryNode newNode(String nodeId, Map attributes) { - return new DiscoveryNode(nodeId, LocalTransportAddress.buildUnique(), attributes, MASTER_DATA_ROLES, Version.CURRENT); + return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), attributes, MASTER_DATA_ROLES, Version.CURRENT); } protected static DiscoveryNode newNode(String nodeId, Set roles) { - return new DiscoveryNode(nodeId, LocalTransportAddress.buildUnique(), emptyMap(), roles, Version.CURRENT); + return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT); } protected static DiscoveryNode newNode(String nodeId, Version version) { - return new DiscoveryNode(nodeId, LocalTransportAddress.buildUnique(), emptyMap(), MASTER_DATA_ROLES, version); + return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, version); } protected static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) { @@ -149,7 +146,7 @@ public abstract class ESAllocationTestCase extends ESTestCase { ClusterState lastClusterState; do { lastClusterState = clusterState; - logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.debug("ClusterState: {}", clusterState.getRoutingNodes()); clusterState = service.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); } while (lastClusterState.equals(clusterState) == false); return clusterState; diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index 45edbd8bcb2..576b290ed40 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -32,10 +32,10 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; @@ -67,7 +67,7 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService { usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeBytes()); paths[0] = path; FsInfo fsInfo = new FsInfo(System.currentTimeMillis(), null, paths); - return new NodeStats(new DiscoveryNode(nodeName, LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), + return new NodeStats(new DiscoveryNode(nodeName, ESTestCase.buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), System.currentTimeMillis(), null, null, null, null, null, fsInfo, diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java index 22a1e2660b6..8d1f42d5b57 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java @@ -24,7 +24,6 @@ import com.carrotsearch.randomizedtesting.SeedUtils; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -39,7 +38,7 @@ import java.util.Set; import java.util.WeakHashMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; public class MockBigArrays extends BigArrays { @@ -250,13 +249,13 @@ public class MockBigArrays extends BigArrays { final BigArray in; boolean clearOnResize; - AtomicBoolean released; + AtomicReference originalRelease; AbstractArrayWrapper(BigArray in, boolean clearOnResize) { ACQUIRED_ARRAYS.put(this, TRACK_ALLOCATIONS ? new RuntimeException() : Boolean.TRUE); this.in = in; this.clearOnResize = clearOnResize; - released = new AtomicBoolean(false); + originalRelease = new AtomicReference<>(); } protected abstract BigArray getDelegate(); @@ -272,8 +271,8 @@ public class MockBigArrays extends BigArrays { } public void close() { - if (!released.compareAndSet(false, true)) { - throw new IllegalStateException("Double release"); + if (originalRelease.compareAndSet(null, new AssertionError()) == false) { + throw new IllegalStateException("Double release. Original release attached as cause", originalRelease.get()); } ACQUIRED_ARRAYS.remove(this); randomizeContent(0, size()); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 46d07b3cd87..fbff16e4999 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -41,7 +41,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.NodeEnvironment; @@ -374,7 +373,7 @@ public abstract class IndexShardTestCase extends ESTestCase { } private DiscoveryNode getFakeDiscoNode(String id) { - return new DiscoveryNode(id, new LocalTransportAddress("_fake_" + id), Version.CURRENT); + return new DiscoveryNode(id, buildNewFakeTransportAddress(), Version.CURRENT); } /** recovers a replica from the given primary **/ diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java index 3be4cd3edf4..efdf10d5a5d 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java @@ -19,7 +19,7 @@ package org.elasticsearch.ingest; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; @@ -41,7 +41,7 @@ public final class RandomDocumentPicks { * path to refer to a field name using the dot notation. */ public static String randomFieldName(Random random) { - int numLevels = RandomInts.randomIntBetween(random, 1, 5); + int numLevels = RandomNumbers.randomIntBetween(random, 1, 5); String fieldName = ""; for (int i = 0; i < numLevels; i++) { if (i > 0) { @@ -169,7 +169,7 @@ public final class RandomDocumentPicks { } private static Object randomFieldValue(Random random, int currentDepth) { - switch(RandomInts.randomIntBetween(random, 0, 9)) { + switch(RandomNumbers.randomIntBetween(random, 0, 9)) { case 0: return randomString(random); case 1: @@ -180,28 +180,28 @@ public final class RandomDocumentPicks { return random.nextDouble(); case 4: List stringList = new ArrayList<>(); - int numStringItems = RandomInts.randomIntBetween(random, 1, 10); + int numStringItems = RandomNumbers.randomIntBetween(random, 1, 10); for (int j = 0; j < numStringItems; j++) { stringList.add(randomString(random)); } return stringList; case 5: List intList = new ArrayList<>(); - int numIntItems = RandomInts.randomIntBetween(random, 1, 10); + int numIntItems = RandomNumbers.randomIntBetween(random, 1, 10); for (int j = 0; j < numIntItems; j++) { intList.add(random.nextInt()); } return intList; case 6: List booleanList = new ArrayList<>(); - int numBooleanItems = RandomInts.randomIntBetween(random, 1, 10); + int numBooleanItems = RandomNumbers.randomIntBetween(random, 1, 10); for (int j = 0; j < numBooleanItems; j++) { booleanList.add(random.nextBoolean()); } return booleanList; case 7: List doubleList = new ArrayList<>(); - int numDoubleItems = RandomInts.randomIntBetween(random, 1, 10); + int numDoubleItems = RandomNumbers.randomIntBetween(random, 1, 10); for (int j = 0; j < numDoubleItems; j++) { doubleList.add(random.nextDouble()); } @@ -211,7 +211,7 @@ public final class RandomDocumentPicks { addRandomFields(random, newNode, ++currentDepth); return newNode; case 9: - byte[] byteArray = new byte[RandomInts.randomIntBetween(random, 1, 1024)]; + byte[] byteArray = new byte[RandomNumbers.randomIntBetween(random, 1, 1024)]; random.nextBytes(byteArray); return byteArray; default: @@ -230,7 +230,7 @@ public final class RandomDocumentPicks { if (currentDepth > 5) { return; } - int numFields = RandomInts.randomIntBetween(random, 1, 10); + int numFields = RandomNumbers.randomIntBetween(random, 1, 10); for (int i = 0; i < numFields; i++) { String fieldName = randomLeafFieldName(random); Object fieldValue = randomFieldValue(random, currentDepth); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index b838cca4a23..38e8a8436b1 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -20,19 +20,22 @@ package org.elasticsearch.node; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.ZenPing; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; @@ -84,15 +87,37 @@ public class MockNode extends Node { @Override protected TransportService newTransportService(Settings settings, Transport transport, ThreadPool threadPool, - TransportInterceptor interceptor) { - // we use the MockTransportService.TestPlugin class as a marker to create a newtwork + TransportInterceptor interceptor, ClusterSettings clusterSettings) { + // we use the MockTransportService.TestPlugin class as a marker to create a network // module with this MockNetworkService. NetworkService is such an integral part of the systme // we don't allow to plug it in from plugins or anything. this is a test-only override and // can't be done in a production env. - if (getPluginsService().filterPlugins(MockTransportService.TestPlugin.class).size() == 1) { - return new MockTransportService(settings, transport, threadPool, interceptor); + if (getPluginsService().filterPlugins(MockTransportService.TestPlugin.class).isEmpty()) { + return super.newTransportService(settings, transport, threadPool, interceptor, clusterSettings); } else { - return super.newTransportService(settings, transport, threadPool, interceptor); + return new MockTransportService(settings, transport, threadPool, interceptor, clusterSettings); + } + } + + @Override + protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, + UnicastHostsProvider hostsProvider) { + if (getPluginsService().filterPlugins(MockZenPing.TestPlugin.class).isEmpty()) { + return super.newZenPing(settings, threadPool, transportService, hostsProvider); + } else { + return new MockZenPing(settings); + } + } + + @Override + protected Node newTribeClientNode(Settings settings, Collection> classpathPlugins) { + return new MockNode(settings, classpathPlugins); + } + + @Override + protected void processRecoverySettings(ClusterSettings clusterSettings, RecoverySettings recoverySettings) { + if (false == getPluginsService().filterPlugins(RecoverySettingsChunkSizePlugin.class).isEmpty()) { + clusterSettings.addSettingsUpdateConsumer(RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING, recoverySettings::setChunkSize); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java b/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java index cf565499a8d..ea924734765 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java +++ b/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.transport.MockTcpTransportPlugin; import java.io.IOException; import java.nio.file.Path; @@ -46,13 +47,12 @@ public class NodeTests extends ESTestCase { .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong())) .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put("discovery.type", "local") - .put("transport.type", "local") + .put("transport.type", "mock-socket-network") .put(Node.NODE_DATA_SETTING.getKey(), true); if (name != null) { settings.put(Node.NODE_NAME_SETTING.getKey(), name); } - try (Node node = new MockNode(settings.build(), Collections.emptyList())) { + try (Node node = new MockNode(settings.build(), Collections.singleton(MockTcpTransportPlugin.class))) { final Settings nodeSettings = randomBoolean() ? node.settings() : node.getEnvironment().settings(); if (name == null) { assertThat(Node.NODE_NAME_SETTING.get(nodeSettings), equalTo(node.getNodeEnvironment().nodeId().substring(0, 7))); diff --git a/test/framework/src/main/java/org/elasticsearch/node/RecoverySettingsChunkSizePlugin.java b/test/framework/src/main/java/org/elasticsearch/node/RecoverySettingsChunkSizePlugin.java new file mode 100644 index 00000000000..14616adc528 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/node/RecoverySettingsChunkSizePlugin.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.node; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.plugins.Plugin; + +import java.util.List; + +import static java.util.Collections.singletonList; + +/** + * Marker plugin that will trigger {@link MockNode} making {@link #CHUNK_SIZE_SETTING} dynamic. + */ +public class RecoverySettingsChunkSizePlugin extends Plugin { + /** + * The chunk size. Only exposed by tests. + */ + public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("indices.recovery.chunk_size", + RecoverySettings.DEFAULT_CHUNK_SIZE, Property.Dynamic, Property.NodeScope); + + @Override + public List> getSettings() { + return singletonList(CHUNK_SIZE_SETTING); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/rest/HeadBodyIsEmptyIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/rest/HeadBodyIsEmptyIntegTestCase.java new file mode 100644 index 00000000000..0e43814b75c --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/rest/HeadBodyIsEmptyIntegTestCase.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest; + +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matcher; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Tests that HTTP HEAD requests don't respond with a body. + */ +public class HeadBodyIsEmptyIntegTestCase extends ESRestTestCase { + public void testHeadRoot() throws IOException { + headTestCase("/", emptyMap(), greaterThan(0)); + headTestCase("/", singletonMap("pretty", ""), greaterThan(0)); + headTestCase("/", singletonMap("pretty", "true"), greaterThan(0)); + } + + private void createTestDoc() throws UnsupportedEncodingException, IOException { + client().performRequest("PUT", "test/test/1", emptyMap(), new StringEntity("{\"test\": \"test\"}")); + } + + public void testDocumentExists() throws IOException { + createTestDoc(); + headTestCase("test/test/1", emptyMap(), equalTo(0)); + headTestCase("test/test/1", singletonMap("pretty", "true"), equalTo(0)); + } + + public void testIndexExists() throws IOException { + createTestDoc(); + headTestCase("test", emptyMap(), equalTo(0)); + headTestCase("test", singletonMap("pretty", "true"), equalTo(0)); + } + + public void testTypeExists() throws IOException { + createTestDoc(); + headTestCase("test/test", emptyMap(), equalTo(0)); + headTestCase("test/test", singletonMap("pretty", "true"), equalTo(0)); + } + + private void headTestCase(String url, Map params, Matcher matcher) throws IOException { + Response response = client().performRequest("HEAD", url, params); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertThat(Integer.valueOf(response.getHeader("Content-Length")), matcher); + assertNull("HEAD requests shouldn't have a response body but " + url + " did", response.getEntity()); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java new file mode 100644 index 00000000000..03f36bc1db2 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -0,0 +1,338 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.rescore.RescoreBuilder; +import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.slice.SliceBuilder; +import org.elasticsearch.search.sort.ScriptSortBuilder; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.test.ESTestCase.between; +import static org.elasticsearch.test.ESTestCase.generateRandomStringArray; +import static org.elasticsearch.test.ESTestCase.randomAsciiOfLengthBetween; +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomByte; +import static org.elasticsearch.test.ESTestCase.randomDouble; +import static org.elasticsearch.test.ESTestCase.randomFloat; +import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomInt; +import static org.elasticsearch.test.ESTestCase.randomIntBetween; +import static org.elasticsearch.test.ESTestCase.randomLong; +import static org.elasticsearch.test.ESTestCase.randomPositiveTimeValue; +import static org.elasticsearch.test.ESTestCase.randomShort; +import static org.elasticsearch.test.ESTestCase.randomTimeValue; + +/** + * Builds random search requests. + */ +public class RandomSearchRequestGenerator { + private RandomSearchRequestGenerator() {} + + /** + * Build a random search request. + * + * @param randomSearchSourceBuilder builds a random {@link SearchSourceBuilder}. You can use + * {@link #randomSearchSourceBuilder(Supplier, Supplier, Supplier, Supplier)}. + */ + public static SearchRequest randomSearchRequest(Supplier randomSearchSourceBuilder) throws IOException { + SearchRequest searchRequest = new SearchRequest(); + if (randomBoolean()) { + searchRequest.indices(generateRandomStringArray(10, 10, false, false)); + } + if (randomBoolean()) { + searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + } + if (randomBoolean()) { + searchRequest.types(generateRandomStringArray(10, 10, false, false)); + } + if (randomBoolean()) { + searchRequest.preference(randomAsciiOfLengthBetween(3, 10)); + } + if (randomBoolean()) { + searchRequest.requestCache(randomBoolean()); + } + if (randomBoolean()) { + searchRequest.routing(randomAsciiOfLengthBetween(3, 10)); + } + if (randomBoolean()) { + searchRequest.scroll(randomPositiveTimeValue()); + } + if (randomBoolean()) { + searchRequest.searchType(randomFrom(SearchType.values())); + } + if (randomBoolean()) { + searchRequest.source(randomSearchSourceBuilder.get()); + } + return searchRequest; + } + + public static SearchSourceBuilder randomSearchSourceBuilder( + Supplier randomHighlightBuilder, + Supplier randomSuggestBuilder, + Supplier> randomRescoreBuilder, + Supplier> randomExtBuilders) { + SearchSourceBuilder builder = new SearchSourceBuilder(); + if (randomBoolean()) { + builder.from(randomIntBetween(0, 10000)); + } + if (randomBoolean()) { + builder.size(randomIntBetween(0, 10000)); + } + if (randomBoolean()) { + builder.explain(randomBoolean()); + } + if (randomBoolean()) { + builder.version(randomBoolean()); + } + if (randomBoolean()) { + builder.trackScores(randomBoolean()); + } + if (randomBoolean()) { + builder.minScore(randomFloat() * 1000); + } + if (randomBoolean()) { + builder.timeout(TimeValue.parseTimeValue(randomTimeValue(), null, "timeout")); + } + if (randomBoolean()) { + builder.terminateAfter(randomIntBetween(1, 100000)); + } + + switch(randomInt(2)) { + case 0: + builder.storedFields(); + break; + case 1: + builder.storedField("_none_"); + break; + case 2: + int fieldsSize = randomInt(25); + List fields = new ArrayList<>(fieldsSize); + for (int i = 0; i < fieldsSize; i++) { + fields.add(randomAsciiOfLengthBetween(5, 50)); + } + builder.storedFields(fields); + break; + default: + throw new IllegalStateException(); + } + + if (randomBoolean()) { + int scriptFieldsSize = randomInt(25); + for (int i = 0; i < scriptFieldsSize; i++) { + if (randomBoolean()) { + builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo"), randomBoolean()); + } else { + builder.scriptField(randomAsciiOfLengthBetween(5, 50), new Script("foo")); + } + } + } + if (randomBoolean()) { + FetchSourceContext fetchSourceContext; + int branch = randomInt(5); + String[] includes = new String[randomIntBetween(0, 20)]; + for (int i = 0; i < includes.length; i++) { + includes[i] = randomAsciiOfLengthBetween(5, 20); + } + String[] excludes = new String[randomIntBetween(0, 20)]; + for (int i = 0; i < excludes.length; i++) { + excludes[i] = randomAsciiOfLengthBetween(5, 20); + } + switch (branch) { + case 0: + fetchSourceContext = new FetchSourceContext(randomBoolean()); + break; + case 1: + fetchSourceContext = new FetchSourceContext(true, includes, excludes); + break; + case 2: + fetchSourceContext = new FetchSourceContext(true, new String[]{randomAsciiOfLengthBetween(5, 20)}, + new String[]{randomAsciiOfLengthBetween(5, 20)}); + break; + case 3: + fetchSourceContext = new FetchSourceContext(true, includes, excludes); + break; + case 4: + fetchSourceContext = new FetchSourceContext(true, includes, null); + break; + case 5: + fetchSourceContext = new FetchSourceContext(true, new String[] {randomAsciiOfLengthBetween(5, 20)}, null); + break; + default: + throw new IllegalStateException(); + } + builder.fetchSource(fetchSourceContext); + } + if (randomBoolean()) { + int size = randomIntBetween(0, 20); + List statsGroups = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + statsGroups.add(randomAsciiOfLengthBetween(5, 20)); + } + builder.stats(statsGroups); + } + if (randomBoolean()) { + int indexBoostSize = randomIntBetween(1, 10); + for (int i = 0; i < indexBoostSize; i++) { + builder.indexBoost(randomAsciiOfLengthBetween(5, 20), randomFloat() * 10); + } + } + if (randomBoolean()) { + builder.query(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20))); + } + if (randomBoolean()) { + builder.postFilter(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20))); + } + if (randomBoolean()) { + int numSorts = randomIntBetween(1, 5); + for (int i = 0; i < numSorts; i++) { + int branch = randomInt(5); + switch (branch) { + case 0: + builder.sort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values()))); + break; + case 1: + builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20), + AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values()))); + break; + case 2: + builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); + break; + case 3: + builder.sort(SortBuilders.scriptSort(new Script("foo"), + ScriptSortBuilder.ScriptSortType.NUMBER).order(randomFrom(SortOrder.values()))); + break; + case 4: + builder.sort(randomAsciiOfLengthBetween(5, 20)); + break; + case 5: + builder.sort(randomAsciiOfLengthBetween(5, 20), randomFrom(SortOrder.values())); + break; + } + } + } + + if (randomBoolean()) { + int numSearchFrom = randomIntBetween(1, 5); + try { + // We build a json version of the search_from first in order to + // ensure that every number type remain the same before/after xcontent (de)serialization. + // This is not a problem because the final type of each field value is extracted from associated sort field. + // This little trick ensure that equals and hashcode are the same when using the xcontent serialization. + XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); + jsonBuilder.startObject(); + jsonBuilder.startArray("search_from"); + for (int i = 0; i < numSearchFrom; i++) { + int branch = randomInt(8); + switch (branch) { + case 0: + jsonBuilder.value(randomInt()); + break; + case 1: + jsonBuilder.value(randomFloat()); + break; + case 2: + jsonBuilder.value(randomLong()); + break; + case 3: + jsonBuilder.value(randomDouble()); + break; + case 4: + jsonBuilder.value(randomAsciiOfLengthBetween(5, 20)); + break; + case 5: + jsonBuilder.value(randomBoolean()); + break; + case 6: + jsonBuilder.value(randomByte()); + break; + case 7: + jsonBuilder.value(randomShort()); + break; + case 8: + jsonBuilder.value(new Text(randomAsciiOfLengthBetween(5, 20))); + break; + } + } + jsonBuilder.endArray(); + jsonBuilder.endObject(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(jsonBuilder.bytes()); + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + builder.searchAfter(SearchAfterBuilder.fromXContent(parser, null).getSortValues()); + } catch (IOException e) { + throw new RuntimeException("Error building search_from", e); + } + } + if (randomBoolean()) { + builder.highlighter(randomHighlightBuilder.get()); + } + if (randomBoolean()) { + builder.suggest(randomSuggestBuilder.get()); + } + if (randomBoolean()) { + int numRescores = randomIntBetween(1, 5); + for (int i = 0; i < numRescores; i++) { + builder.addRescorer(randomRescoreBuilder.get()); + } + } + if (randomBoolean()) { + builder.aggregation(AggregationBuilders.avg(randomAsciiOfLengthBetween(5, 20))); + } + if (randomBoolean()) { + builder.ext(randomExtBuilders.get()); + } + if (randomBoolean()) { + String field = randomBoolean() ? null : randomAsciiOfLengthBetween(5, 20); + int max = between(2, 1000); + int id = randomInt(max-1); + if (field == null) { + builder.slice(new SliceBuilder(id, max)); + } else { + builder.slice(new SliceBuilder(field, id, max)); + } + } + return builder; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java index 703119a7a14..29457603c1c 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java @@ -26,9 +26,6 @@ import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -/** - * - */ @ESIntegTestCase.SuiteScopeTestCase public abstract class AbstractNumericTestCase extends ESIntegTestCase { protected static long minValue, maxValue, minValues, maxValues; diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 6225f5fa5d0..e65e0ab4de7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -20,6 +20,7 @@ package org.elasticsearch.test; import com.fasterxml.jackson.core.io.JsonStringEncoder; + import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -50,11 +51,14 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -120,12 +124,14 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import static java.util.Collections.emptyList; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; public abstract class AbstractQueryTestCase> extends ESTestCase { @@ -153,13 +159,17 @@ public abstract class AbstractQueryTestCase> private static String[] currentTypes; private static String[] randomTypes; + /** + * used to check warning headers of the deprecation logger + */ + private ThreadContext threadContext; protected static Index getIndex() { return index; } protected static String[] getCurrentTypes() { - return currentTypes; + return currentTypes == null ? Strings.EMPTY_ARRAY : currentTypes; } protected Collection> getPlugins() { @@ -207,9 +217,23 @@ public abstract class AbstractQueryTestCase> serviceHolder = new ServiceHolder(nodeSettings, indexSettings, getPlugins(), this); } serviceHolder.clientInvocationHandler.delegate = this; + this.threadContext = new ThreadContext(Settings.EMPTY); + DeprecationLogger.setThreadContext(threadContext); } - private static void setSearchContext(String[] types, QueryShardContext context) { + /** + * Check that there are no unaccounted warning headers. These should be checked with {@link #checkWarningHeaders(String...)} in the + * appropriate test + */ + @After + public void teardown() throws IOException { + final List warnings = threadContext.getResponseHeaders().get(DeprecationLogger.DEPRECATION_HEADER); + assertNull("unexpected warning headers", warnings); + DeprecationLogger.removeThreadContext(this.threadContext); + this.threadContext.close(); + } + + private static SearchContext getSearchContext(String[] types, QueryShardContext context) { TestSearchContext testSearchContext = new TestSearchContext(context) { @Override public MapperService mapperService() { @@ -222,13 +246,12 @@ public abstract class AbstractQueryTestCase> } }; testSearchContext.getQueryShardContext().setTypes(types); - SearchContext.setCurrent(testSearchContext); + return testSearchContext; } @After public void afterTest() { serviceHolder.clientInvocationHandler.delegate = null; - SearchContext.removeCurrent(); } public final QB createTestQueryBuilder() { @@ -482,7 +505,7 @@ public abstract class AbstractQueryTestCase> } } - private void queryWrappedInArrayTest(String queryName, String validQuery) throws IOException { + private static void queryWrappedInArrayTest(String queryName, String validQuery) throws IOException { int i = validQuery.indexOf("\"" + queryName + "\""); assertThat(i, greaterThan(0)); @@ -573,6 +596,13 @@ public abstract class AbstractQueryTestCase> return parseInnerQueryBuilder; } + /** + * Whether the queries produced by this builder are expected to be cacheable. + */ + protected boolean builderGeneratesCacheableQueries() { + return true; + } + /** * Test creates the {@link Query} from the {@link QueryBuilder} under test and delegates the * assertions being made on the result to the implementing subclass. @@ -580,15 +610,26 @@ public abstract class AbstractQueryTestCase> public void testToQuery() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTQUERIES; runs++) { QueryShardContext context = createShardContext(); + assert context.isCachable(); context.setAllowUnmappedFields(true); QB firstQuery = createTestQueryBuilder(); QB controlQuery = copyQuery(firstQuery); - setSearchContext(randomTypes, context); // only set search context for toQuery to be more realistic - Query firstLuceneQuery = rewriteQuery(firstQuery, context).toQuery(context); + SearchContext searchContext = getSearchContext(randomTypes, context); + /* we use a private rewrite context here since we want the most realistic way of asserting that we are cacheable or not. + * We do it this way in SearchService where + * we first rewrite the query with a private context, then reset the context and then build the actual lucene query*/ + QueryBuilder rewritten = rewriteQuery(firstQuery, new QueryShardContext(context)); + Query firstLuceneQuery = rewritten.toQuery(context); + if (isCachable(firstQuery)) { + assertTrue("query was marked as not cacheable in the context but this test indicates it should be cacheable: " + + firstQuery.toString(), context.isCachable()); + } else { + assertFalse("query was marked as cacheable in the context but this test indicates it should not be cacheable: " + + firstQuery.toString(), context.isCachable()); + } assertNotNull("toQuery should not return null", firstLuceneQuery); - assertLuceneQuery(firstQuery, firstLuceneQuery, context); + assertLuceneQuery(firstQuery, firstLuceneQuery, searchContext); //remove after assertLuceneQuery since the assertLuceneQuery impl might access the context as well - SearchContext.removeCurrent(); assertTrue( "query is not equal to its copy after calling toQuery, firstQuery: " + firstQuery + ", secondQuery: " + controlQuery, firstQuery.equals(controlQuery)); @@ -603,20 +644,19 @@ public abstract class AbstractQueryTestCase> secondQuery.queryName(secondQuery.queryName() == null ? randomAsciiOfLengthBetween(1, 30) : secondQuery.queryName() + randomAsciiOfLengthBetween(1, 10)); } - setSearchContext(randomTypes, context); + searchContext = getSearchContext(randomTypes, context); Query secondLuceneQuery = rewriteQuery(secondQuery, context).toQuery(context); assertNotNull("toQuery should not return null", secondLuceneQuery); - assertLuceneQuery(secondQuery, secondLuceneQuery, context); - SearchContext.removeCurrent(); + assertLuceneQuery(secondQuery, secondLuceneQuery, searchContext); - assertEquals("two equivalent query builders lead to different lucene queries", - rewrite(secondLuceneQuery), rewrite(firstLuceneQuery)); + if (builderGeneratesCacheableQueries()) { + assertEquals("two equivalent query builders lead to different lucene queries", + rewrite(secondLuceneQuery), rewrite(firstLuceneQuery)); + } if (supportsBoostAndQueryName()) { secondQuery.boost(firstQuery.boost() + 1f + randomFloat()); - setSearchContext(randomTypes, context); Query thirdLuceneQuery = rewriteQuery(secondQuery, context).toQuery(context); - SearchContext.removeCurrent(); assertNotEquals("modifying the boost doesn't affect the corresponding lucene query", rewrite(firstLuceneQuery), rewrite(thirdLuceneQuery)); } @@ -636,6 +676,10 @@ public abstract class AbstractQueryTestCase> return rewritten; } + protected boolean isCachable(QB queryBuilder) { + return true; + } + /** * Few queries allow you to set the boost and queryName on the java api, although the corresponding parser * doesn't parse them as they are not supported. This method allows to disable boost and queryName related tests for those queries. @@ -649,11 +693,11 @@ public abstract class AbstractQueryTestCase> /** * Checks the result of {@link QueryBuilder#toQuery(QueryShardContext)} given the original {@link QueryBuilder} * and {@link QueryShardContext}. Verifies that named queries and boost are properly handled and delegates to - * {@link #doAssertLuceneQuery(AbstractQueryBuilder, Query, QueryShardContext)} for query specific checks. + * {@link #doAssertLuceneQuery(AbstractQueryBuilder, Query, SearchContext)} for query specific checks. */ - private void assertLuceneQuery(QB queryBuilder, Query query, QueryShardContext context) throws IOException { + private void assertLuceneQuery(QB queryBuilder, Query query, SearchContext context) throws IOException { if (queryBuilder.queryName() != null) { - Query namedQuery = context.copyNamedQueries().get(queryBuilder.queryName()); + Query namedQuery = context.getQueryShardContext().copyNamedQueries().get(queryBuilder.queryName()); assertThat(namedQuery, equalTo(query)); } if (query != null) { @@ -677,7 +721,7 @@ public abstract class AbstractQueryTestCase> * Checks the result of {@link QueryBuilder#toQuery(QueryShardContext)} given the original {@link QueryBuilder} * and {@link QueryShardContext}. Contains the query specific checks to be implemented by subclasses. */ - protected abstract void doAssertLuceneQuery(QB queryBuilder, Query query, QueryShardContext context) throws IOException; + protected abstract void doAssertLuceneQuery(QB queryBuilder, Query query, SearchContext context) throws IOException; protected static void assertTermOrBoostQuery(Query query, String field, String value, float fieldBoost) { if (fieldBoost != AbstractQueryBuilder.DEFAULT_BOOST) { @@ -724,47 +768,28 @@ public abstract class AbstractQueryTestCase> public void testEqualsAndHashcode() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTQUERIES; runs++) { - QB firstQuery = createTestQueryBuilder(); - assertFalse("query is equal to null", firstQuery.equals(null)); - assertFalse("query is equal to incompatible type", firstQuery.equals("")); - assertTrue("query is not equal to self", firstQuery.equals(firstQuery)); - assertThat("same query's hashcode returns different values if called multiple times", firstQuery.hashCode(), - equalTo(firstQuery.hashCode())); - - QB secondQuery = copyQuery(firstQuery); - assertTrue("query is not equal to self", secondQuery.equals(secondQuery)); - assertTrue("query is not equal to its copy", firstQuery.equals(secondQuery)); - assertTrue("equals is not symmetric", secondQuery.equals(firstQuery)); - assertThat("query copy's hashcode is different from original hashcode", secondQuery.hashCode(), equalTo(firstQuery.hashCode())); - - QB thirdQuery = copyQuery(secondQuery); - assertTrue("query is not equal to self", thirdQuery.equals(thirdQuery)); - assertTrue("query is not equal to its copy", secondQuery.equals(thirdQuery)); - assertThat("query copy's hashcode is different from original hashcode", secondQuery.hashCode(), equalTo(thirdQuery.hashCode())); - assertTrue("equals is not transitive", firstQuery.equals(thirdQuery)); - assertThat("query copy's hashcode is different from original hashcode", firstQuery.hashCode(), equalTo(thirdQuery.hashCode())); - assertTrue("equals is not symmetric", thirdQuery.equals(secondQuery)); - assertTrue("equals is not symmetric", thirdQuery.equals(firstQuery)); - - if (randomBoolean()) { - secondQuery.queryName(secondQuery.queryName() == null ? randomAsciiOfLengthBetween(1, 30) : secondQuery.queryName() - + randomAsciiOfLengthBetween(1, 10)); - } else { - secondQuery.boost(firstQuery.boost() + 1f + randomFloat()); - } - assertThat("different queries should not be equal", secondQuery, not(equalTo(firstQuery))); + // TODO we only change name and boost, we should extend by any sub-test supplying a "mutate" method that randomly changes one + // aspect of the object under test + checkEqualsAndHashCode(createTestQueryBuilder(), this::copyQuery, this::changeNameOrBoost); } } + private QB changeNameOrBoost(QB original) throws IOException { + QB secondQuery = copyQuery(original); + if (randomBoolean()) { + secondQuery.queryName(secondQuery.queryName() == null ? randomAsciiOfLengthBetween(1, 30) : secondQuery.queryName() + + randomAsciiOfLengthBetween(1, 10)); + } else { + secondQuery.boost(original.boost() + 1f + randomFloat()); + } + return secondQuery; + } + //we use the streaming infra to create a copy of the query provided as argument @SuppressWarnings("unchecked") private QB copyQuery(QB query) throws IOException { - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.writeNamedWriteable(query); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), serviceHolder.namedWriteableRegistry)) { - return (QB) in.readNamedWriteable(QueryBuilder.class); - } - } + Reader reader = (Reader) serviceHolder.namedWriteableRegistry.getReader(QueryBuilder.class, query.getWriteableName()); + return copyWriteable(query, serviceHolder.namedWriteableRegistry, reader); } /** @@ -999,7 +1024,6 @@ public abstract class AbstractQueryTestCase> QueryShardContext context = createShardContext(); context.setAllowUnmappedFields(true); QB queryBuilder = createTestQueryBuilder(); - setSearchContext(randomTypes, context); // only set search context for toQuery to be more realistic queryBuilder.toQuery(context); } @@ -1007,6 +1031,23 @@ public abstract class AbstractQueryTestCase> return query; } + protected void checkWarningHeaders(String... messages) { + final List warnings = threadContext.getResponseHeaders().get(DeprecationLogger.DEPRECATION_HEADER); + assertThat(warnings, hasSize(messages.length)); + for (String msg : messages) { + assertThat(warnings, hasItem(equalTo(msg))); + } + // "clear" current warning headers by setting a new ThreadContext + DeprecationLogger.removeThreadContext(this.threadContext); + try { + this.threadContext.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + this.threadContext = new ThreadContext(Settings.EMPTY); + DeprecationLogger.setThreadContext(this.threadContext); + } + private static class ServiceHolder implements Closeable { private final IndicesQueriesRegistry indicesQueriesRegistry; @@ -1020,6 +1061,7 @@ public abstract class AbstractQueryTestCase> private final BitsetFilterCache bitsetFilterCache; private final ScriptService scriptService; private final Client client; + private final long nowInMillis = randomPositiveLong(); ServiceHolder(Settings nodeSettings, Settings indexSettings, Collection> plugins, AbstractQueryTestCase testCase) throws IOException { @@ -1097,8 +1139,8 @@ public abstract class AbstractQueryTestCase> QueryShardContext createShardContext() { ClusterState state = ClusterState.builder(new ClusterName("_name")).build(); - return new QueryShardContext(idxSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, - scriptService, indicesQueriesRegistry, this.client, null, state); + return new QueryShardContext(0, idxSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, + scriptService, indicesQueriesRegistry, this.client, null, state, () -> nowInMillis); } ScriptModule createScriptModule(List scriptPlugins) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java index 3c5f105e4d1..b739099cff0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java +++ b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java @@ -18,7 +18,7 @@ package org.elasticsearch.test;/* */ import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -208,10 +208,10 @@ public class BackgroundIndexer implements AutoCloseable { } private XContentBuilder generateSource(long id, Random random) throws IOException { - int contentLength = RandomInts.randomIntBetween(random, minFieldSize, maxFieldSize); + int contentLength = RandomNumbers.randomIntBetween(random, minFieldSize, maxFieldSize); StringBuilder text = new StringBuilder(contentLength); while (text.length() < contentLength) { - int tokenLength = RandomInts.randomIntBetween(random, 1, Math.min(contentLength - text.length(), 10)); + int tokenLength = RandomNumbers.randomIntBetween(random, 1, Math.min(contentLength - text.length(), 10)); text.append(" ").append(RandomStrings.randomRealisticUnicodeOfCodepointLength(random, tokenLength)); } XContentBuilder builder = XContentFactory.smileBuilder(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java index 38682239b78..3e3896dfc2c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java @@ -29,12 +29,12 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.concurrent.CountDownLatch; import static junit.framework.TestCase.fail; @@ -42,7 +42,7 @@ import static junit.framework.TestCase.fail; public class ClusterServiceUtils { public static ClusterService createClusterService(ThreadPool threadPool) { - DiscoveryNode discoveryNode = new DiscoveryNode("node", LocalTransportAddress.buildUnique(), Collections.emptyMap(), + DiscoveryNode discoveryNode = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())),Version.CURRENT); return createClusterService(threadPool, discoveryNode); } @@ -54,12 +54,12 @@ public class ClusterServiceUtils { clusterService.setLocalNode(localNode); clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { @Override - public void connectToAddedNodes(ClusterChangedEvent event) { + public void connectToNodes(List addedNodes) { // skip } @Override - public void disconnectFromRemovedNodes(ClusterChangedEvent event) { + public void disconnectFromNodes(List removedNodes) { // skip } }); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java index 16647b04a47..0ece6fad393 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java @@ -29,8 +29,6 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.listeners.LoggingListener; @@ -206,6 +204,11 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { return finalSettings.build(); } + @Override + protected boolean addMockZenPings() { + return false; + } + protected int minExternalNodes() { return 1; } protected int maxExternalNodes() { @@ -243,7 +246,6 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { protected Settings commonNodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(requiredSettings()); builder.put(NetworkModule.TRANSPORT_TYPE_KEY, randomBoolean() ? "netty3" : "netty4"); // run same transport / disco as external - builder.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen"); return builder.build(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 0dcd671b29c..763f993ff0b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -16,11 +16,12 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.annotations.TestGroup; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.http.HttpHost; import org.apache.lucene.util.IOUtils; @@ -82,7 +83,6 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -95,7 +95,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.env.Environment; @@ -121,10 +120,11 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.client.RandomizingClient; +import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.store.MockFSIndexStore; -import org.elasticsearch.test.transport.AssertingLocalTransport; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.AssertingTransportInterceptor; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.hamcrest.Matchers; import org.junit.After; @@ -153,6 +153,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.IdentityHashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Random; import java.util.Set; @@ -165,6 +166,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BooleanSupplier; import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.client.Requests.syncedFlushRequest; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; @@ -180,6 +183,7 @@ import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; @@ -434,7 +438,7 @@ public abstract class ESIntegTestCase extends ESTestCase { if (randomBoolean()) { // keep this low so we don't stall tests - builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), RandomInts.randomIntBetween(random, 1, 15) + "ms"); + builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 1, 15) + "ms"); } return builder; @@ -447,8 +451,8 @@ public abstract class ESIntegTestCase extends ESTestCase { } switch (random.nextInt(4)) { case 3: - final int maxThreadCount = RandomInts.randomIntBetween(random, 1, 4); - final int maxMergeCount = RandomInts.randomIntBetween(random, maxThreadCount, maxThreadCount + 4); + final int maxThreadCount = RandomNumbers.randomIntBetween(random, 1, 4); + final int maxMergeCount = RandomNumbers.randomIntBetween(random, maxThreadCount, maxThreadCount + 4); builder.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), maxMergeCount); builder.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), maxThreadCount); break; @@ -459,7 +463,7 @@ public abstract class ESIntegTestCase extends ESTestCase { private static Settings.Builder setRandomIndexTranslogSettings(Random random, Settings.Builder builder) { if (random.nextBoolean()) { - builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 300), ByteSizeUnit.MB)); + builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 300), ByteSizeUnit.MB)); } if (random.nextBoolean()) { builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)); // just don't flush @@ -469,14 +473,14 @@ public abstract class ESIntegTestCase extends ESTestCase { } if (random.nextBoolean()) { - builder.put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), RandomInts.randomIntBetween(random, 100, 5000), TimeUnit.MILLISECONDS); + builder.put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 100, 5000), TimeUnit.MILLISECONDS); } return builder; } private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception { - return RandomizedContext.current().runWithPrivateRandomness(new com.carrotsearch.randomizedtesting.Randomness(seed), new Callable() { + return RandomizedContext.current().runWithPrivateRandomness(seed, new Callable() { @Override public TestCluster call() throws Exception { return buildTestCluster(scope, seed); @@ -536,12 +540,11 @@ public abstract class ESIntegTestCase extends ESTestCase { for (Discovery discovery : internalCluster().getInstances(Discovery.class)) { if (discovery instanceof ZenDiscovery) { final ZenDiscovery zenDiscovery = (ZenDiscovery) discovery; - assertBusy(new Runnable() { - @Override - public void run() { - assertThat("still having pending states: " + Strings.arrayToDelimitedString(zenDiscovery.pendingClusterStates(), "\n"), - zenDiscovery.pendingClusterStates(), emptyArray()); - } + assertBusy(() -> { + final ClusterState[] states = zenDiscovery.pendingClusterStates(); + assertThat(zenDiscovery.localNode().getName() + " still having pending states:\n" + + Stream.of(states).map(ClusterState::toString).collect(Collectors.joining("\n")), + states, emptyArray()); }); } } @@ -767,17 +770,14 @@ public abstract class ESIntegTestCase extends ESTestCase { */ public void waitNoPendingTasksOnAll() throws Exception { assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get()); - assertBusy(new Runnable() { - @Override - public void run() { - for (Client client : clients()) { - ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); - assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); - PendingClusterTasksResponse pendingTasks = client.admin().cluster().preparePendingClusterTasks().setLocal(true).get(); - assertThat("client " + client + " still has pending tasks " + pendingTasks.prettyPrint(), pendingTasks, Matchers.emptyIterable()); - clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); - assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); - } + assertBusy(() -> { + for (Client client : clients()) { + ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); + assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); + PendingClusterTasksResponse pendingTasks = client.admin().cluster().preparePendingClusterTasks().setLocal(true).get(); + assertThat("client " + client + " still has pending tasks " + pendingTasks, pendingTasks, Matchers.emptyIterable()); + clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); + assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); } }); assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get()); @@ -878,14 +878,45 @@ public abstract class ESIntegTestCase extends ESTestCase { * @param timeout time out value to set on {@link org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest} */ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) { - ClusterHealthResponse actionGet = client().admin().cluster() - .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForNoRelocatingShards(true)).actionGet(); + return ensureColor(ClusterHealthStatus.GREEN, timeout, indices); + } + + /** + * Ensures the cluster has a yellow state via the cluster health API. + */ + public ClusterHealthStatus ensureYellow(String... indices) { + return ensureColor(ClusterHealthStatus.YELLOW, TimeValue.timeValueSeconds(30), indices); + } + + private ClusterHealthStatus ensureColor(ClusterHealthStatus clusterHealthStatus, TimeValue timeout, String... indices) { + String color = clusterHealthStatus.name().toLowerCase(Locale.ROOT); + String method = "ensure" + Strings.capitalize(color); + + ClusterHealthRequest healthRequest = Requests.clusterHealthRequest(indices) + .timeout(timeout) + .waitForStatus(clusterHealthStatus) + .waitForEvents(Priority.LANGUID) + .waitForNoRelocatingShards(true) + // We currently often use ensureGreen or ensureYellow to check whether the cluster is back in a good state after shutting down + // a node. If the node that is stopped is the master node, another node will become master and publish a cluster state where it + // is master but where the node that was stopped hasn't been removed yet from the cluster state. It will only subsequently + // publish a second state where the old master is removed. If the ensureGreen/ensureYellow is timed just right, it will get to + // execute before the second cluster state update removes the old master and the condition ensureGreen / ensureYellow will + // trivially hold if it held before the node was shut down. The following "waitForNodes" condition ensures that the node has + // been removed by the master so that the health check applies to the set of nodes we expect to be part of the cluster. + .waitForNodes(Integer.toString(cluster().size())); + + ClusterHealthResponse actionGet = client().admin().cluster().health(healthRequest).actionGet(); if (actionGet.isTimedOut()) { - logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); - fail("timed out waiting for green state"); + logger.info("{} timed out, cluster state:\n{}\n{}", + method, + client().admin().cluster().prepareState().get().getState(), + client().admin().cluster().preparePendingClusterTasks().get()); + fail("timed out waiting for " + color + " state"); } - assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN)); - logger.debug("indices {} are green", indices.length == 0 ? "[_all]" : indices); + assertThat("Expected at least " + clusterHealthStatus + " but got " + actionGet.getStatus(), + actionGet.getStatus().value(), lessThanOrEqualTo(clusterHealthStatus.value())); + logger.debug("indices {} are {}", indices.length == 0 ? "[_all]" : indices, color); return actionGet.getStatus(); } @@ -908,7 +939,8 @@ public abstract class ESIntegTestCase extends ESTestCase { ClusterHealthResponse actionGet = client().admin().cluster() .health(request).actionGet(); if (actionGet.isTimedOut()) { - logger.info("waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false)); } if (status != null) { @@ -998,25 +1030,12 @@ public abstract class ESIntegTestCase extends ESTestCase { .get().isAcknowledged()); } - /** - * Ensures the cluster has a yellow state via the cluster health API. - */ - public ClusterHealthStatus ensureYellow(String... indices) { - ClusterHealthResponse actionGet = client().admin().cluster() - .health(Requests.clusterHealthRequest(indices).waitForNoRelocatingShards(true).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet(); - if (actionGet.isTimedOut()) { - logger.info("ensureYellow timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); - assertThat("timed out waiting for yellow", actionGet.isTimedOut(), equalTo(false)); - } - logger.debug("indices {} are yellow", indices.length == 0 ? "[_all]" : indices); - return actionGet.getStatus(); - } - /** * Prints the current cluster state as debug logging. */ public void logClusterState() { - logger.debug("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.debug("cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); } /** @@ -1117,7 +1136,7 @@ public abstract class ESIntegTestCase extends ESTestCase { if (clusterHealthResponse.isTimedOut()) { ClusterStateResponse stateResponse = client(viaNode).admin().cluster().prepareState().get(); fail("failed to reach a stable cluster of [" + nodeCount + "] nodes. Tried via [" + viaNode + "]. last cluster state:\n" - + stateResponse.getState().prettyPrint()); + + stateResponse.getState()); } assertThat(clusterHealthResponse.isTimedOut(), is(false)); } @@ -1685,7 +1704,7 @@ public abstract class ESIntegTestCase extends ESTestCase { for (String stringAddress : stringAddresses) { URL url = new URL("http://" + stringAddress); InetAddress inetAddress = InetAddress.getByName(url.getHost()); - transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort())); + transportAddresses[i++] = new TransportAddress(new InetSocketAddress(inetAddress, url.getPort())); } return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses); } @@ -1736,9 +1755,6 @@ public abstract class ESIntegTestCase extends ESTestCase { ArrayList> mocks = new ArrayList<>(mockPlugins); // add both mock plugins - local and tcp if they are not there // we do this in case somebody overrides getMockPlugins and misses to call super - if (mockPlugins.contains(AssertingLocalTransport.TestPlugin.class) == false) { - mocks.add(AssertingLocalTransport.TestPlugin.class); - } if (mockPlugins.contains(MockTcpTransportPlugin.class) == false) { mocks.add(MockTcpTransportPlugin.class); } @@ -1750,24 +1766,9 @@ public abstract class ESIntegTestCase extends ESTestCase { } protected NodeConfigurationSource getNodeConfigSource() { - SuppressLocalMode noLocal = getAnnotation(this.getClass(), SuppressLocalMode.class); - SuppressNetworkMode noNetwork = getAnnotation(this.getClass(), SuppressNetworkMode.class); Settings.Builder networkSettings = Settings.builder(); - final boolean isNetwork; - if (noLocal != null && noNetwork != null) { - throw new IllegalStateException("Can't suppress both network and local mode"); - } else if (noLocal != null) { - if (addMockTransportService()) { - networkSettings.put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME); - } - isNetwork = true; - } else { - if (addMockTransportService()) { - networkSettings.put(NetworkModule.TRANSPORT_TYPE_KEY, AssertingLocalTransport.ASSERTING_TRANSPORT_NAME); - } else { - networkSettings.put(NetworkModule.TRANSPORT_TYPE_KEY, "local"); - } - isNetwork = false; + if (addMockTransportService()) { + networkSettings.put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME); } NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { @@ -1775,8 +1776,6 @@ public abstract class ESIntegTestCase extends ESTestCase { public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), - isNetwork ? DiscoveryModule.DISCOVERY_TYPE_SETTING.getDefault(Settings.EMPTY) : "local") .put(networkSettings.build()). put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build(); } @@ -1795,12 +1794,9 @@ public abstract class ESIntegTestCase extends ESTestCase { @Override public Collection> transportClientPlugins() { Collection> plugins = ESIntegTestCase.this.transportClientPlugins(); - if (isNetwork && plugins.contains(MockTcpTransportPlugin.class) == false) { + if (plugins.contains(MockTcpTransportPlugin.class) == false) { plugins = new ArrayList<>(plugins); plugins.add(MockTcpTransportPlugin.class); - } else if (isNetwork == false && plugins.contains(AssertingLocalTransport.class) == false) { - plugins = new ArrayList<>(plugins); - plugins.add(AssertingLocalTransport.TestPlugin.class); } return Collections.unmodifiableCollection(plugins); } @@ -1816,6 +1812,10 @@ public abstract class ESIntegTestCase extends ESTestCase { return true; } + protected boolean addMockZenPings() { + return true; + } + /** * Returns a function that allows to wrap / filter all clients that are exposed by the test cluster. This is useful * for debugging or request / response pre and post processing. It also allows to intercept all calls done by the test @@ -1844,12 +1844,18 @@ public abstract class ESIntegTestCase extends ESTestCase { if (randomBoolean()) { mocks.add(MockSearchService.TestPlugin.class); } + if (randomBoolean()) { + mocks.add(AssertingTransportInterceptor.TestPlugin.class); + } } if (addMockTransportService()) { - mocks.add(AssertingLocalTransport.TestPlugin.class); mocks.add(MockTcpTransportPlugin.class); } + + if (addMockZenPings()) { + mocks.add(MockZenPing.TestPlugin.class); + } mocks.add(TestSeedPlugin.class); return Collections.unmodifiableList(mocks); } @@ -1994,6 +2000,7 @@ public abstract class ESIntegTestCase extends ESTestCase { try { INSTANCE.printTestMessage("cleaning up after"); INSTANCE.afterInternal(true); + checkStaticState(); } finally { INSTANCE = null; } @@ -2112,8 +2119,7 @@ public abstract class ESIntegTestCase extends ESTestCase { for (NodeInfo node : nodes) { if (node.getHttp() != null) { TransportAddress publishAddress = node.getHttp().address().publishAddress(); - assertEquals(1, publishAddress.uniqueAddressTypeId()); - InetSocketAddress address = ((InetSocketTransportAddress) publishAddress).address(); + InetSocketAddress address = publishAddress.address(); hosts.add(new HttpHost(NetworkAddress.format(address.getAddress()), address.getPort(), protocol)); } } @@ -2149,29 +2155,10 @@ public abstract class ESIntegTestCase extends ESTestCase { public @interface SuiteScopeTestCase { } - /** - * If used the test will never run in local mode. - */ - @Retention(RetentionPolicy.RUNTIME) - @Inherited - @Target(ElementType.TYPE) - public @interface SuppressLocalMode { - } - - /** - * If used the test will never run in network mode - */ - @Retention(RetentionPolicy.RUNTIME) - @Inherited - @Target(ElementType.TYPE) - public @interface SuppressNetworkMode { - } - public static Index resolveIndex(String index) { GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().setIndices(index).get(); assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); String uuid = getIndexResponse.getSettings().get(index).get(IndexMetaData.SETTING_INDEX_UUID); return new Index(index, uuid); } - } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 4916d7df2f6..9648eb5798e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -45,7 +45,9 @@ import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.MockTcpTransportPlugin; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -53,6 +55,7 @@ import org.junit.BeforeClass; import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -179,12 +182,20 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000) .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put("discovery.type", "local") - .put("transport.type", "local") + .put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .put(Node.NODE_DATA_SETTING.getKey(), true) .put(nodeSettings()) // allow test cases to provide their own settings or override these .build(); - Node build = new MockNode(settings, getPlugins()); + Collection> plugins = getPlugins(); + if (plugins.contains(MockTcpTransportPlugin.class) == false) { + plugins = new ArrayList<>(plugins); + plugins.add(MockTcpTransportPlugin.class); + } + if (plugins.contains(MockZenPing.TestPlugin.class) == false) { + plugins = new ArrayList<>(plugins); + plugins.add(MockZenPing.TestPlugin.class); + } + Node build = new MockNode(settings, plugins); try { build.start(); } catch (NodeValidationException e) { @@ -283,7 +294,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { BigArrays bigArrays = indexService.getBigArrays(); ThreadPool threadPool = indexService.getThreadPool(); ScriptService scriptService = node().injector().getInstance(ScriptService.class); - return new TestSearchContext(threadPool, bigArrays, scriptService, indexService); + return new TestSearchContext(threadPool, bigArrays, indexService); } /** @@ -307,7 +318,8 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { ClusterHealthResponse actionGet = client().admin().cluster() .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForNoRelocatingShards(true)).actionGet(); if (actionGet.isTimedOut()) { - logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState(), + client().admin().cluster().preparePendingClusterTasks().get()); assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false)); } assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 981be92357a..350f5be0beb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -25,11 +25,15 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.status.StatusConsoleListener; +import org.apache.logging.log4j.status.StatusData; +import org.apache.logging.log4j.status.StatusLogger; import org.apache.lucene.uninverting.UninvertingReader; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; @@ -43,8 +47,15 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtilsForTesting; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -100,14 +111,17 @@ import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.Supplier; +import java.util.stream.Collectors; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; /** @@ -131,6 +145,13 @@ import static org.hamcrest.Matchers.equalTo; @LuceneTestCase.SuppressReproduceLine public abstract class ESTestCase extends LuceneTestCase { + private static final AtomicInteger portGenerator = new AtomicInteger(); + + @AfterClass + public static void resetPortCounter() { + portGenerator.set(0); + } + static { System.setProperty("log4j.shutdownHookEnabled", "false"); // we can not shutdown logging when tests are running or the next test that runs within the @@ -173,6 +194,14 @@ public abstract class ESTestCase extends LuceneTestCase { } }); + /** + * Generates a new transport address using {@link TransportAddress#META_ADDRESS} with an incrementing port number. + * The port number starts at 0 and is reset after each test suite run. + */ + public static TransportAddress buildNewFakeTransportAddress() { + return new TransportAddress(TransportAddress.META_ADDRESS, portGenerator.incrementAndGet()); + } + /** * Called when a test fails, supplying the errors it generated. Not called when the test fails because assumptions are violated. */ @@ -212,11 +241,49 @@ public abstract class ESTestCase extends LuceneTestCase { @After public final void ensureCleanedUp() throws Exception { + checkStaticState(); + } + + private static final List statusData = new ArrayList<>(); + static { + // ensure that the status logger is set to the warn level so we do not miss any warnings with our Log4j usage + StatusLogger.getLogger().setLevel(Level.WARN); + // Log4j will write out status messages indicating problems with the Log4j usage to the status logger; we hook into this logger and + // assert that no such messages were written out as these would indicate a problem with our logging configuration + StatusLogger.getLogger().registerListener(new StatusConsoleListener(Level.WARN) { + + @Override + public void log(StatusData data) { + synchronized (statusData) { + statusData.add(data); + } + } + + }); + } + + // separate method so that this can be checked again after suite scoped cluster is shut down + protected static void checkStaticState() throws Exception { MockPageCacheRecycler.ensureAllPagesAreReleased(); MockBigArrays.ensureAllArraysAreReleased(); // field cache should NEVER get loaded. String[] entries = UninvertingReader.getUninvertedStats(); assertEquals("fieldcache must never be used, got=" + Arrays.toString(entries), 0, entries.length); + + // ensure no one changed the status logger level on us + assertThat(StatusLogger.getLogger().getLevel(), equalTo(Level.WARN)); + synchronized (statusData) { + try { + // ensure that there are no status logger messages which would indicate a problem with our Log4j usage; we map the + // StatusData instances to Strings as otherwise their toString output is useless + assertThat( + statusData.stream().map(status -> status.getMessage().getFormattedMessage()).collect(Collectors.toList()), + empty()); + } finally { + // we clear the list so that status data from other tests do not interfere with tests within the same JVM + statusData.clear(); + } + } } // this must be a separate method from other ensure checks above so suite scoped integ tests can call...TODO: fix that @@ -263,7 +330,7 @@ public abstract class ESTestCase extends LuceneTestCase { * @see #scaledRandomIntBetween(int, int) */ public static int randomIntBetween(int min, int max) { - return RandomInts.randomIntBetween(random(), min, max); + return RandomNumbers.randomIntBetween(random(), min, max); } /** @@ -731,6 +798,22 @@ public abstract class ESTestCase extends LuceneTestCase { return targetMap; } + /** + * Create a copy of an original {@link Writeable} object by running it through a {@link BytesStreamOutput} and + * reading it in again using a provided {@link Writeable.Reader}. The stream that is wrapped around the {@link StreamInput} + * potentially need to use a {@link NamedWriteableRegistry}, so this needs to be provided too (although it can be + * empty if the object that is streamed doesn't contain any {@link NamedWriteable} objects itself. + */ + public static T copyWriteable(T original, NamedWriteableRegistry namedWritabelRegistry, + Writeable.Reader reader) throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + original.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWritabelRegistry)) { + return reader.read(in); + } + } + } + /** * Returns true iff assertions for elasticsearch packages are enabled */ @@ -897,4 +980,5 @@ public abstract class ESTestCase extends LuceneTestCase { this.charFilter = charFilter; } } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java new file mode 100644 index 00000000000..bf1cd8132da --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; + +/** + * Utility class that encapsulates standard checks and assertions around testing the equals() and hashCode() + * methods of objects that implement them. + */ +public class EqualsHashCodeTestUtils { + + private static Object[] someObjects = new Object[] { "some string", new Integer(1), new Double(1.0) }; + + /** + * A function that makes a copy of its input argument + */ + public interface CopyFunction { + T copy(T t) throws IOException; + }; + + /** + * A function that creates a copy of its input argument that is different from its + * input in exactly one aspect (e.g. one parameter of a class instance should change) + */ + public interface MutateFunction { + T mutate(T t) throws IOException; + }; + + /** + * Perform common equality and hashCode checks on the input object + * @param original the object under test + * @param copyFunction a function that creates a deep copy of the input object + */ + public static void checkEqualsAndHashCode(T original, CopyFunction copyFunction) { + checkEqualsAndHashCode(original, copyFunction, null); + } + + /** + * Perform common equality and hashCode checks on the input object + * @param original the object under test + * @param copyFunction a function that creates a deep copy of the input object + * @param mutationFunction a function that creates a copy of the input object that is different + * from the input in one aspect. The output of this call is used to check that it is not equal() + * to the input object + */ + public static void checkEqualsAndHashCode(T original, CopyFunction copyFunction, + MutateFunction mutationFunction) { + try { + String objectName = original.getClass().getSimpleName(); + assertFalse(objectName + " is equal to null", original.equals(null)); + // TODO not sure how useful the following test is + assertFalse(objectName + " is equal to incompatible type", original.equals(ESTestCase.randomFrom(someObjects))); + assertTrue(objectName + " is not equal to self", original.equals(original)); + assertThat(objectName + " hashcode returns different values if called multiple times", original.hashCode(), + equalTo(original.hashCode())); + if (mutationFunction != null) { + assertThat(objectName + " mutation should not be equal to original", mutationFunction.mutate(original), + not(equalTo(original))); + } + + T copy = copyFunction.copy(original); + assertTrue(objectName + " copy is not equal to self", copy.equals(copy)); + assertTrue(objectName + " is not equal to its copy", original.equals(copy)); + assertTrue("equals is not symmetric", copy.equals(original)); + assertThat(objectName + " hashcode is different from copies hashcode", copy.hashCode(), equalTo(original.hashCode())); + + T secondCopy = copyFunction.copy(copy); + assertTrue("second copy is not equal to self", secondCopy.equals(secondCopy)); + assertTrue("copy is not equal to its second copy", copy.equals(secondCopy)); + assertThat("second copy's hashcode is different from original hashcode", copy.hashCode(), equalTo(secondCopy.hashCode())); + assertTrue("equals is not transitive", original.equals(secondCopy)); + assertTrue("equals is not symmetric", secondCopy.equals(copy)); + assertTrue("equals is not symmetric", secondCopy.equals(original)); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java index adab3b70455..8e24a99b895 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; @@ -98,7 +97,7 @@ public final class ExternalTestCluster extends TestCluster { int masterAndDataNodes = 0; for (int i = 0; i < nodeInfos.getNodes().size(); i++) { NodeInfo nodeInfo = nodeInfos.getNodes().get(i); - httpAddresses[i] = ((InetSocketTransportAddress) nodeInfo.getHttp().address().publishAddress()).address(); + httpAddresses[i] = nodeInfo.getHttp().address().publishAddress().address(); if (DiscoveryNode.isDataNode(nodeInfo.getSettings())) { dataNodes++; masterAndDataNodes++; diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java index f46069484d6..a5b1667a13f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java @@ -21,7 +21,6 @@ package org.elasticsearch.test; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.plugins.Plugin; import java.util.Arrays; @@ -31,6 +30,8 @@ public final class InternalSettingsPlugin extends Plugin { public static final Setting VERSION_CREATED = Setting.intSetting("index.version.created", 0, Property.IndexScope, Property.NodeScope); + public static final Setting PROVIDED_NAME_SETTING = + Setting.simpleString("index.provided_name",Property.IndexScope, Property.NodeScope); public static final Setting MERGE_ENABLED = Setting.boolSetting("index.merge.enabled", true, Property.IndexScope, Property.NodeScope); public static final Setting INDEX_CREATION_DATE_SETTING = @@ -38,6 +39,7 @@ public final class InternalSettingsPlugin extends Plugin { @Override public List> getSettings() { - return Arrays.asList(VERSION_CREATED, MERGE_ENABLED, INDEX_CREATION_DATE_SETTING); + return Arrays.asList(VERSION_CREATED, MERGE_ENABLED, + INDEX_CREATION_DATE_SETTING, PROVIDED_NAME_SETTING); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index aff32300221..13bbc471dde 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -21,9 +21,10 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.SeedUtils; import com.carrotsearch.randomizedtesting.SysGlobals; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.apache.logging.log4j.Logger; import org.apache.lucene.store.StoreRateLimiting; import org.apache.lucene.util.IOUtils; @@ -31,8 +32,10 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; +import org.elasticsearch.action.support.replication.ReplicationTask; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterName; @@ -56,7 +59,6 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -64,6 +66,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -88,6 +92,8 @@ import org.elasticsearch.node.service.NodeService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchService; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.MockTransportClient; @@ -250,7 +256,7 @@ public final class InternalTestCluster extends TestCluster { boolean useDedicatedMasterNodes = randomlyAddDedicatedMasters ? random.nextBoolean() : false; - this.numSharedDataNodes = RandomInts.randomIntBetween(random, minNumDataNodes, maxNumDataNodes); + this.numSharedDataNodes = RandomNumbers.randomIntBetween(random, minNumDataNodes, maxNumDataNodes); assert this.numSharedDataNodes >= 0; if (numSharedDataNodes == 0) { @@ -268,7 +274,7 @@ public final class InternalTestCluster extends TestCluster { this.numSharedDedicatedMasterNodes = 0; } if (numClientNodes < 0) { - this.numSharedCoordOnlyNodes = RandomInts.randomIntBetween(random, DEFAULT_MIN_NUM_CLIENT_NODES, DEFAULT_MAX_NUM_CLIENT_NODES); + this.numSharedCoordOnlyNodes = RandomNumbers.randomIntBetween(random, DEFAULT_MIN_NUM_CLIENT_NODES, DEFAULT_MAX_NUM_CLIENT_NODES); } else { this.numSharedCoordOnlyNodes = numClientNodes; } @@ -322,14 +328,14 @@ public final class InternalTestCluster extends TestCluster { // Some tests make use of scripting quite a bit, so increase the limit for integration tests builder.put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000); if (TEST_NIGHTLY) { - builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 5, 10)); - builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 5, 10)); + builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 5, 10)); + builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 5, 10)); } else if (random.nextInt(100) <= 90) { - builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 2, 5)); - builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 2, 5)); + builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 2, 5)); + builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 2, 5)); } // always reduce this - it can make tests really slow - builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 20, 50))); + builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), TimeValue.timeValueMillis(RandomNumbers.randomIntBetween(random, 20, 50))); defaultSettings = builder.build(); executor = EsExecutors.newScaling("test runner", 0, Integer.MAX_VALUE, 0, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory("test_" + clusterName), new ThreadContext(Settings.EMPTY)); } @@ -397,7 +403,7 @@ public final class InternalTestCluster extends TestCluster { } if (random.nextBoolean()) { - builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), new TimeValue(RandomInts.randomIntBetween(random, 10, 30), TimeUnit.SECONDS)); + builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), new TimeValue(RandomNumbers.randomIntBetween(random, 10, 30), TimeUnit.SECONDS)); } if (random.nextInt(10) == 0) { @@ -407,9 +413,9 @@ public final class InternalTestCluster extends TestCluster { if (random.nextBoolean()) { if (random.nextInt(10) == 0) { // do something crazy slow here - builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); + builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); } else { - builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); + builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); } } if (random.nextBoolean()) { @@ -418,21 +424,21 @@ public final class InternalTestCluster extends TestCluster { if (random.nextBoolean()) { if (random.nextInt(10) == 0) { // do something crazy slow here - builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); + builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); } else { - builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); + builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); } } if (random.nextBoolean()) { - builder.put(TcpTransport.PING_SCHEDULE.getKey(), RandomInts.randomIntBetween(random, 100, 2000) + "ms"); + builder.put(TcpTransport.PING_SCHEDULE.getKey(), RandomNumbers.randomIntBetween(random, 100, 2000) + "ms"); } if (random.nextBoolean()) { - builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getKey(), RandomInts.randomIntBetween(random, 0, 2000)); + builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 0, 2000)); } if (random.nextBoolean()) { - builder.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING.getKey(), TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 750, 10000000)).getStringRep()); + builder.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING.getKey(), TimeValue.timeValueMillis(RandomNumbers.randomIntBetween(random, 750, 10000000)).getStringRep()); } return builder.build(); @@ -1014,7 +1020,7 @@ public final class InternalTestCluster extends TestCluster { } @Override - public void beforeIndexDeletion() { + public void beforeIndexDeletion() throws IOException { // Check that the operations counter on index shard has reached 0. // The assumption here is that after a test there are no ongoing write operations. // test that have ongoing write operations after the test (for example because ttl is used @@ -1049,7 +1055,7 @@ public final class InternalTestCluster extends TestCluster { } } - private void assertShardIndexCounter() { + private void assertShardIndexCounter() throws IOException { final Collection nodesAndClients = nodes.values(); for (NodeAndClient nodeAndClient : nodesAndClients) { IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); @@ -1063,6 +1069,24 @@ public final class InternalTestCluster extends TestCluster { } catch (Exception e) { throw new RuntimeException("unexpected error while checking for shard counters", e); } + int activeOperationsCount = indexShard.getActiveOperationsCount(); + if (activeOperationsCount > 0) { + TaskManager taskManager = getInstance(TransportService.class, nodeAndClient.name).getTaskManager(); + DiscoveryNode localNode = getInstance(ClusterService.class, nodeAndClient.name).localNode(); + List taskInfos = taskManager.getTasks().values().stream() + .filter(task -> task instanceof ReplicationTask) + .map(task -> task.taskInfo(localNode.getId(), true)) + .collect(Collectors.toList()); + ListTasksResponse response = new ListTasksResponse(taskInfos, Collections.emptyList(), Collections.emptyList()); + XContentBuilder builder = XContentFactory.jsonBuilder() + .prettyPrint() + .startObject() + .value(response) + .endObject(); + throw new AssertionError("expected index shard counter on shard " + indexShard.shardId() + " on node " + + nodeAndClient.name + " to be 0 but was " + activeOperationsCount + ". Current replication tasks on node:\n" + + builder.string()); + } } } } @@ -1198,7 +1222,7 @@ public final class InternalTestCluster extends TestCluster { public InetSocketAddress[] httpAddresses() { List addresses = new ArrayList<>(); for (HttpServerTransport httpServerTransport : getInstances(HttpServerTransport.class)) { - addresses.add(((InetSocketTransportAddress) httpServerTransport.boundAddress().publishAddress()).address()); + addresses.add(httpServerTransport.boundAddress().publishAddress().address()); } return addresses.toArray(new InetSocketAddress[addresses.size()]); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java index fe46251e3ee..7f43c9de61b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.upgrade.get.IndexUpgradeStatus; import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.Loggers; @@ -56,7 +57,10 @@ import static junit.framework.TestCase.assertFalse; import static junit.framework.TestCase.assertTrue; import static org.elasticsearch.test.ESTestCase.randomInt; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; public class OldIndexUtils { @@ -103,10 +107,36 @@ public class OldIndexUtils { throw new IllegalStateException("Backwards index must contain exactly one cluster"); } - // the bwc scripts packs the indices under this path - Path src = list[0].resolve("nodes/0/indices/" + indexName); - assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src)); - copyIndex(logger, src, indexName, paths); + final Path src = getIndexDir(logger, indexName, indexFile, list[0]); + copyIndex(logger, src, src.getFileName().toString(), paths); + } + + public static Path getIndexDir( + final Logger logger, + final String indexName, + final String indexFile, + final Path dataDir) throws IOException { + final Version version = Version.fromString(indexName.substring("index-".length())); + if (version.before(Version.V_5_0_0_alpha1)) { + // the bwc scripts packs the indices under this path + Path src = dataDir.resolve("nodes/0/indices/" + indexName); + assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src)); + return src; + } else { + final List indexFolders = new ArrayList<>(); + try (DirectoryStream stream = Files.newDirectoryStream(dataDir.resolve("0/indices"), + (p) -> p.getFileName().toString().startsWith("extra") == false)) { // extra FS can break this... + for (final Path path : stream) { + indexFolders.add(path); + } + } + assertThat(indexFolders.toString(), indexFolders.size(), equalTo(1)); + final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, indexFolders.get(0)); + assertNotNull(indexMetaData); + assertThat(indexFolders.get(0).getFileName().toString(), equalTo(indexMetaData.getIndexUUID())); + assertThat(indexMetaData.getCreationVersion(), equalTo(version)); + return indexFolders.get(0); + } } public static void assertNotUpgraded(Client client, String... index) throws Exception { @@ -128,10 +158,10 @@ public class OldIndexUtils { } // randomly distribute the files from src over dests paths - public static void copyIndex(final Logger logger, final Path src, final String indexName, final Path... dests) throws IOException { + public static void copyIndex(final Logger logger, final Path src, final String folderName, final Path... dests) throws IOException { Path destinationDataPath = dests[randomInt(dests.length - 1)]; for (Path dest : dests) { - Path indexDir = dest.resolve(indexName); + Path indexDir = dest.resolve(folderName); assertFalse(Files.exists(indexDir)); Files.createDirectories(indexDir); } @@ -140,7 +170,7 @@ public class OldIndexUtils { public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { Path relativeDir = src.relativize(dir); for (Path dest : dests) { - Path destDir = dest.resolve(indexName).resolve(relativeDir); + Path destDir = dest.resolve(folderName).resolve(relativeDir); Files.createDirectories(destDir); } return FileVisitResult.CONTINUE; @@ -155,7 +185,7 @@ public class OldIndexUtils { } Path relativeFile = src.relativize(file); - Path destFile = destinationDataPath.resolve(indexName).resolve(relativeFile); + Path destFile = destinationDataPath.resolve(folderName).resolve(relativeFile); logger.trace("--> Moving {} to {}", relativeFile, destFile); Files.move(file, destFile); assertFalse(Files.exists(file)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index 124960fe921..b960685777e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -82,7 +82,7 @@ public abstract class TestCluster implements Closeable { /** * Assertions that should run before the cluster is wiped should be called in this method */ - public void beforeIndexDeletion() { + public void beforeIndexDeletion() throws IOException { } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index c6a1f64820b..1e86f940a11 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.apache.lucene.util.Counter; +import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.unit.TimeValue; @@ -37,7 +38,6 @@ import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchExtBuilder; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.SearchContextAggregations; @@ -53,7 +53,6 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; @@ -77,11 +76,11 @@ public class TestSearchContext extends SearchContext { final Counter timeEstimateCounter = Counter.newCounter(); final QuerySearchResult queryResult = new QuerySearchResult(); final QueryShardContext queryShardContext; - ScriptService scriptService; ParsedQuery originalQuery; ParsedQuery postFilter; Query query; Float minScore; + SearchTask task; ContextIndexSearcher searcher; int size; @@ -91,7 +90,7 @@ public class TestSearchContext extends SearchContext { private final long originNanoTime = System.nanoTime(); private final Map searchExtBuilders = new HashMap<>(); - public TestSearchContext(ThreadPool threadPool, BigArrays bigArrays, ScriptService scriptService, IndexService indexService) { + public TestSearchContext(ThreadPool threadPool, BigArrays bigArrays, IndexService indexService) { super(ParseFieldMatcher.STRICT); this.bigArrays = bigArrays.withCircuitBreaking(); this.indexService = indexService; @@ -99,8 +98,7 @@ public class TestSearchContext extends SearchContext { this.fixedBitSetFilterCache = indexService.cache().bitsetFilterCache(); this.threadPool = threadPool; this.indexShard = indexService.getShardOrNull(0); - this.scriptService = scriptService; - queryShardContext = indexService.newQueryShardContext(); + queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L); } public TestSearchContext(QueryShardContext queryShardContext) { @@ -111,7 +109,6 @@ public class TestSearchContext extends SearchContext { this.threadPool = null; this.fixedBitSetFilterCache = null; this.indexShard = null; - scriptService = null; this.queryShardContext = queryShardContext; } @@ -169,11 +166,6 @@ public class TestSearchContext extends SearchContext { return originNanoTime; } - @Override - protected long nowInMillisImpl() { - return 0; - } - @Override public ScrollContext scrollContext() { return null; @@ -299,11 +291,6 @@ public class TestSearchContext extends SearchContext { return null; } - @Override - public ScriptService scriptService() { - return scriptService; - } - @Override public BigArrays bigArrays() { return bigArrays; @@ -338,6 +325,11 @@ public class TestSearchContext extends SearchContext { this.terminateAfter = terminateAfter; } + @Override + public boolean lowLevelCancellation() { + return false; + } + @Override public SearchContext minimumScore(float minimumScore) { this.minScore = minimumScore; @@ -527,11 +519,6 @@ public class TestSearchContext extends SearchContext { public void keepAlive(long keepAlive) { } - @Override - public SearchLookup lookup() { - return new SearchLookup(mapperService(), fieldData(), null); - } - @Override public DfsSearchResult dfsResult() { return null; @@ -590,4 +577,18 @@ public class TestSearchContext extends SearchContext { return queryShardContext; } + @Override + public void setTask(SearchTask task) { + this.task = task; + } + + @Override + public SearchTask getTask() { + return task; + } + + @Override + public boolean isCancelled() { + return task.isCancelled(); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java index b393498ec89..3fd2b024a1d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; @@ -39,7 +38,7 @@ import java.util.Set; public class ClusterDiscoveryConfiguration extends NodeConfigurationSource { - static Settings DEFAULT_NODE_SETTINGS = Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "zen").build(); + static Settings DEFAULT_NODE_SETTINGS = Settings.EMPTY; private static final String IP_ADDR = "127.0.0.1"; final int numOfNodes; diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java new file mode 100644 index 00000000000..d5e7de1d9bf --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.discovery; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.discovery.zen.PingContextProvider; +import org.elasticsearch.discovery.zen.ZenPing; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.Plugin; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * A {@link ZenPing} implementation which returns results based on an static in-memory map. This allows pinging + * to be immediate and can be used to speed up tests. + */ +public final class MockZenPing extends AbstractComponent implements ZenPing { + + /** A marker plugin used by {@link org.elasticsearch.node.MockNode} to indicate this mock zen ping should be used. */ + public static class TestPlugin extends Plugin {} + + static final Map> activeNodesPerCluster = ConcurrentCollections.newConcurrentMap(); + + private volatile PingContextProvider contextProvider; + + @Inject + public MockZenPing(Settings settings) { + super(settings); + } + + @Override + public void start(PingContextProvider contextProvider) { + this.contextProvider = contextProvider; + assert contextProvider != null; + boolean added = getActiveNodesForCurrentCluster().add(this); + assert added; + } + + @Override + public void ping(PingListener listener, TimeValue timeout) { + logger.info("pinging using mock zen ping"); + List responseList = getActiveNodesForCurrentCluster().stream() + .filter(p -> p != this) // remove this as pings are not expected to return the local node + .map(MockZenPing::getPingResponse) + .collect(Collectors.toList()); + listener.onPing(responseList); + } + + private ClusterName getClusterName() { + return contextProvider.clusterState().getClusterName(); + } + + private PingResponse getPingResponse() { + final ClusterState clusterState = contextProvider.clusterState(); + return new PingResponse(clusterState.nodes().getLocalNode(), clusterState.nodes().getMasterNode(), clusterState); + } + + private Set getActiveNodesForCurrentCluster() { + return activeNodesPerCluster.computeIfAbsent(getClusterName(), + clusterName -> ConcurrentCollections.newConcurrentSet()); + } + + @Override + public void close() { + boolean found = getActiveNodesForCurrentCluster().remove(this); + assert found; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java index 944ddb9b05f..6985d2dcf17 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java @@ -39,7 +39,9 @@ public class LongGCDisruption extends SingleNodeDisruption { private static final Pattern[] unsafeClasses = new Pattern[]{ // logging has shared JVM locks - we may suspend a thread and block other nodes from doing their thing - Pattern.compile("logging\\.log4j") + Pattern.compile("logging\\.log4j"), + // security manager is shared across all nodes AND it uses synced hashmaps interanlly + Pattern.compile("java\\.lang\\.SecurityManager") }; protected final String disruptedNode; diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java index f7094d8ae9f..de57eee6937 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java @@ -58,6 +58,14 @@ public class NetworkDisruption implements ServiceDisruptionScheme { this.networkLinkDisruptionType = networkLinkDisruptionType; } + public DisruptedLinks getDisruptedLinks() { + return disruptedLinks; + } + + public NetworkLinkDisruptionType getNetworkLinkDisruptionType() { + return networkLinkDisruptionType; + } + @Override public void applyToCluster(InternalTestCluster cluster) { this.cluster = cluster; @@ -143,6 +151,11 @@ public class NetworkDisruption implements ServiceDisruptionScheme { return (MockTransportService) cluster.getInstance(TransportService.class, node); } + @Override + public String toString() { + return "network disruption (disruption type: " + networkLinkDisruptionType + ", disrupted links: " + disruptedLinks + ")"; + } + /** * Represents a set of nodes with connections between nodes that are to be disrupted */ diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 18d6939dd4d..0a426d85265 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -655,7 +655,13 @@ public class ElasticsearchAssertions { if (streamable instanceof ActionRequest) { ((ActionRequest) streamable).validate(); } - BytesReference orig = serialize(version, streamable); + BytesReference orig; + try { + orig = serialize(version, streamable); + } catch (IllegalArgumentException e) { + // Can't serialize with this version so skip this test. + return; + } StreamInput input = orig.streamInput(); if (namedWriteableRegistry != null) { input = new NamedWriteableAwareStreamInput(input, namedWriteableRegistry); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 8040c421dce..da8c54396df 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -19,6 +19,7 @@ package org.elasticsearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; + import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -31,11 +32,13 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestPath; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; +import org.junit.BeforeClass; import java.io.IOException; import java.net.URI; @@ -58,6 +61,8 @@ public class ClientYamlTestClient { //query_string params that don't need to be declared in the spec, they are supported by default private static final Set ALWAYS_ACCEPTED_QUERY_STRING_PARAMS = Sets.newHashSet("pretty", "source", "filter_path"); + private static boolean loggedInit = false; + private final ClientYamlSuiteRestSpec restSpec; private final RestClient restClient; private final Version esVersion; @@ -66,34 +71,84 @@ public class ClientYamlTestClient { assert hosts.size() > 0; this.restSpec = restSpec; this.restClient = restClient; - this.esVersion = readAndCheckVersion(hosts); + Tuple versionTuple = readMasterAndMinNodeVersion(hosts.size()); + this.esVersion = versionTuple.v1(); + Version masterVersion = versionTuple.v2(); + if (false == loggedInit) { + /* This will be logged once per suite which lines up with randomized runner's dumping the output of all failing suites. It'd + * be super noisy to log this once per test. We can't log it in a @BeforeClass method because we need the class variables. */ + logger.info("initializing client, minimum es version: [{}] master version: [{}] hosts: {}", esVersion, masterVersion, hosts); + loggedInit = true; + } } - private Version readAndCheckVersion(List hosts) throws IOException { + /** + * Reset {@link #loggedInit} so we log the connection setup before this suite. + */ + @BeforeClass + public static void clearLoggedInit() { + loggedInit = false; + } + + private Tuple readMasterAndMinNodeVersion(int numHosts) throws IOException { + try { + // we simply go to the _cat/nodes API and parse all versions in the cluster + Response response = restClient.performRequest("GET", "/_cat/nodes", Collections.singletonMap("h", "version,master")); + ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response); + String nodesCatResponse = restTestResponse.getBodyAsString(); + String[] split = nodesCatResponse.split("\n"); + Version version = null; + Version masterVersion = null; + for (String perNode : split) { + final String[] versionAndMaster = perNode.split(" "); + assert versionAndMaster.length == 2 : "invalid line: " + perNode + " length: " + versionAndMaster.length; + final Version currentVersion = Version.fromString(versionAndMaster[0]); + final boolean master = versionAndMaster[1].trim().equals("*"); + if (master) { + assert masterVersion == null; + masterVersion = currentVersion; + } + if (version == null) { + version = currentVersion; + } else if (version.onOrAfter(currentVersion)) { + version = currentVersion; + } + } + return new Tuple<>(version, masterVersion); + } catch (ResponseException ex) { + if (ex.getResponse().getStatusLine().getStatusCode() == 403) { + logger.warn("Fallback to simple info '/' request, _cat/nodes is not authorized"); + final Version version = readAndCheckVersion(numHosts); + return new Tuple<>(version, version); + } + throw ex; + } + } + + private Version readAndCheckVersion(int numHosts) throws IOException { ClientYamlSuiteRestApi restApi = restApi("info"); assert restApi.getPaths().size() == 1; assert restApi.getMethods().size() == 1; - - String version = null; - for (HttpHost ignored : hosts) { + Version version = null; + for (int i = 0; i < numHosts; i++) { //we don't really use the urls here, we rely on the client doing round-robin to touch all the nodes in the cluster String method = restApi.getMethods().get(0); String endpoint = restApi.getPaths().get(0); Response response = restClient.performRequest(method, endpoint); ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response); + Object latestVersion = restTestResponse.evaluate("version.number"); if (latestVersion == null) { throw new RuntimeException("elasticsearch version not found in the response"); } + final Version currentVersion = Version.fromString(restTestResponse.evaluate("version.number").toString()); if (version == null) { - version = latestVersion.toString(); - } else { - if (!latestVersion.equals(version)) { - throw new IllegalArgumentException("provided nodes addresses run different elasticsearch versions"); - } + version = currentVersion; + } else if (version.onOrAfter(currentVersion)) { + version = currentVersion; } } - return Version.fromString(version); + return version; } public Version getEsVersion() { @@ -143,10 +198,7 @@ public class ClientYamlTestClient { } } - //create doesn't exist in the spec but is supported in the clients (index with op_type=create) - boolean indexCreateApi = "create".equals(apiName); - String api = indexCreateApi ? "index" : apiName; - ClientYamlSuiteRestApi restApi = restApi(api); + ClientYamlSuiteRestApi restApi = restApi(apiName); //divide params between ones that go within query string and ones that go within path Map pathParts = new HashMap<>(); @@ -164,10 +216,6 @@ public class ClientYamlTestClient { } } - if (indexCreateApi) { - queryStringParams.put("op_type", "create"); - } - List supportedMethods = restApi.getSupportedMethods(pathParts.keySet()); String requestMethod; StringEntity requestBody = null; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 2e29721f06e..cb46c278c2b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -108,7 +108,7 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { for (String entry : blacklist) { this.blacklistPathMatchers.add(new BlacklistedPathPatternMatcher(entry)); } - + } @Override @@ -117,9 +117,9 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { super.afterIfFailed(errors); } - public static Iterable createParameters(int id, int count) throws IOException, ClientYamlTestParseException { + public static Iterable createParameters() throws IOException, ClientYamlTestParseException { //parse tests only if rest test group is enabled, otherwise rest tests might not even be available on file system - List restTestCandidates = collectTestCandidates(id, count); + List restTestCandidates = collectTestCandidates(); List objects = new ArrayList<>(); for (ClientYamlTestCandidate restTestCandidate : restTestCandidates) { objects.add(new Object[]{restTestCandidate}); @@ -127,7 +127,7 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { return objects; } - private static List collectTestCandidates(int id, int count) throws ClientYamlTestParseException, IOException { + private static List collectTestCandidates() throws ClientYamlTestParseException, IOException { List testCandidates = new ArrayList<>(); FileSystem fileSystem = getFileSystem(); // don't make a try-with, getFileSystem returns null @@ -140,12 +140,9 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { for (String api : yamlSuites.keySet()) { List yamlFiles = new ArrayList<>(yamlSuites.get(api)); for (Path yamlFile : yamlFiles) { - String key = api + yamlFile.getFileName().toString(); - if (mustExecute(key, id, count)) { - ClientYamlTestSuite restTestSuite = restTestSuiteParser.parse(api, yamlFile); - for (ClientYamlTestSection testSection : restTestSuite.getTestSections()) { - testCandidates.add(new ClientYamlTestCandidate(restTestSuite, testSection)); - } + ClientYamlTestSuite restTestSuite = restTestSuiteParser.parse(api, yamlFile); + for (ClientYamlTestSection testSection : restTestSuite.getTestSections()) { + testCandidates.add(new ClientYamlTestCandidate(restTestSuite, testSection)); } } } @@ -164,11 +161,6 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { return testCandidates; } - private static boolean mustExecute(String test, int id, int count) { - int hash = (int) (Math.abs((long)test.hashCode()) % count); - return hash == id; - } - private static String[] resolvePathsProperty(String propertyName, String defaultValue) { String property = System.getProperty(propertyName); if (!Strings.hasLength(property)) { @@ -267,27 +259,16 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { restTestExecutionContext.clear(); //skip test if the whole suite (yaml file) is disabled - assumeFalse(buildSkipMessage(testCandidate.getSuitePath(), testCandidate.getSetupSection().getSkipSection()), + assumeFalse(testCandidate.getSetupSection().getSkipSection().getSkipMessage(testCandidate.getSuitePath()), testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion())); //skip test if the whole suite (yaml file) is disabled - assumeFalse(buildSkipMessage(testCandidate.getSuitePath(), testCandidate.getTeardownSection().getSkipSection()), + assumeFalse(testCandidate.getTeardownSection().getSkipSection().getSkipMessage(testCandidate.getSuitePath()), testCandidate.getTeardownSection().getSkipSection().skip(restTestExecutionContext.esVersion())); //skip test if test section is disabled - assumeFalse(buildSkipMessage(testCandidate.getTestPath(), testCandidate.getTestSection().getSkipSection()), + assumeFalse(testCandidate.getTestSection().getSkipSection().getSkipMessage(testCandidate.getTestPath()), testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext.esVersion())); } - private static String buildSkipMessage(String description, SkipSection skipSection) { - StringBuilder messageBuilder = new StringBuilder(); - if (skipSection.isVersionCheck()) { - messageBuilder.append("[").append(description).append("] skipped, reason: [").append(skipSection.getReason()).append("] "); - } else { - messageBuilder.append("[").append(description).append("] skipped, reason: features ") - .append(skipSection.getFeatures()).append(" not supported"); - } - return messageBuilder.toString(); - } - public void test() throws IOException { //let's check that there is something to run, otherwise there might be a problem with the test section if (testCandidate.getTestSection().getExecutableSections().size() == 0) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSectionParser.java index b6e8ad6c0f4..b6b6adfd037 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSectionParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSectionParser.java @@ -36,16 +36,17 @@ public class ClientYamlTestSectionParser implements ClientYamlTestFragmentParser try { parser.nextToken(); testSection.setSkipSection(parseContext.parseSkipSection()); - + while ( parser.currentToken() != XContentParser.Token.END_ARRAY) { parseContext.advanceToFieldName(); testSection.addExecutableSection(parseContext.parseExecutableSection()); } - + parser.nextToken(); - assert parser.currentToken() == XContentParser.Token.END_OBJECT; + assert parser.currentToken() == XContentParser.Token.END_OBJECT : "malformed section [" + testSection.getName() + "] expected " + + XContentParser.Token.END_OBJECT + " but was " + parser.currentToken(); parser.nextToken(); - + return testSection; } catch (Exception e) { throw new ClientYamlTestParseException("Error parsing test named [" + testSection.getName() + "]", e); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParser.java index 31451dee247..b73edf7d2c6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParser.java @@ -70,9 +70,6 @@ public class SkipSectionParser implements ClientYamlTestFragmentParser features; private final String reason; - + private SkipSection() { this.lowerVersion = null; this.upperVersion = null; @@ -49,7 +49,6 @@ public class SkipSection { public SkipSection(String versionRange, List features, String reason) { assert features != null; - assert versionRange != null && features.isEmpty() || versionRange == null && features.isEmpty() == false; Version[] versions = parseVersionRange(versionRange); this.lowerVersion = versions[0]; this.upperVersion = versions[1]; @@ -60,7 +59,7 @@ public class SkipSection { public Version getLowerVersion() { return lowerVersion; } - + public Version getUpperVersion() { return upperVersion; } @@ -77,11 +76,10 @@ public class SkipSection { if (isEmpty()) { return false; } - if (isVersionCheck()) { - return currentVersion.onOrAfter(lowerVersion) && currentVersion.onOrBefore(upperVersion); - } else { - return Features.areAllSupported(features) == false; - } + boolean skip = lowerVersion != null && upperVersion != null && currentVersion.onOrAfter(lowerVersion) + && currentVersion.onOrBefore(upperVersion); + skip |= Features.areAllSupported(features) == false; + return skip; } public boolean isVersionCheck() { @@ -91,7 +89,7 @@ public class SkipSection { public boolean isEmpty() { return EMPTY.equals(this); } - + private Version[] parseVersionRange(String versionRange) { if (versionRange == null) { return new Version[] { null, null }; @@ -111,4 +109,16 @@ public class SkipSection { upper.isEmpty() ? Version.CURRENT : Version.fromString(upper) }; } + + public String getSkipMessage(String description) { + StringBuilder messageBuilder = new StringBuilder(); + messageBuilder.append("[").append(description).append("] skipped,"); + if (reason != null) { + messageBuilder.append(" reason: [").append(getReason()).append("]"); + } + if (features.isEmpty() == false) { + messageBuilder.append(" unsupported features ").append(getFeatures()); + } + return messageBuilder.toString(); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java deleted file mode 100644 index b44e180b453..00000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.plugins.NetworkPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.local.LocalTransport; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.function.Supplier; - -public class AssertingLocalTransport extends LocalTransport { - - public static final String ASSERTING_TRANSPORT_NAME = "asserting_local"; - - public static class TestPlugin extends Plugin implements NetworkPlugin { - - @Override - public List> getSettings() { - return Arrays.asList(ASSERTING_TRANSPORT_MIN_VERSION_KEY, ASSERTING_TRANSPORT_MAX_VERSION_KEY); - } - - @Override - public Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, - CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService) { - return Collections.singletonMap(ASSERTING_TRANSPORT_NAME, - () -> new AssertingLocalTransport(settings, circuitBreakerService, threadPool, namedWriteableRegistry)); - } - } - - public static final Setting ASSERTING_TRANSPORT_MIN_VERSION_KEY = - new Setting<>("transport.asserting.version.min", Integer.toString(Version.CURRENT.minimumCompatibilityVersion().id), - (s) -> Version.fromId(Integer.parseInt(s)), Property.NodeScope); - public static final Setting ASSERTING_TRANSPORT_MAX_VERSION_KEY = - new Setting<>("transport.asserting.version.max", Integer.toString(Version.CURRENT.id), - (s) -> Version.fromId(Integer.parseInt(s)), Property.NodeScope); - private final Random random; - private final Version minVersion; - private final Version maxVersion; - - @Inject - public AssertingLocalTransport(Settings settings, CircuitBreakerService circuitBreakerService, ThreadPool threadPool, - NamedWriteableRegistry namedWriteableRegistry) { - super(settings, threadPool, namedWriteableRegistry, circuitBreakerService); - final long seed = ESIntegTestCase.INDEX_TEST_SEED_SETTING.get(settings); - random = new Random(seed); - minVersion = ASSERTING_TRANSPORT_MIN_VERSION_KEY.get(settings); - maxVersion = ASSERTING_TRANSPORT_MAX_VERSION_KEY.get(settings); - } - - @Override - protected void handleParsedResponse(final TransportResponse response, final TransportResponseHandler handler) { - ElasticsearchAssertions.assertVersionSerializable(VersionUtils.randomVersionBetween(random, minVersion, maxVersion), response, - namedWriteableRegistry); - super.handleParsedResponse(response, handler); - } - - @Override - public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, - TransportRequestOptions options) throws IOException, TransportException { - ElasticsearchAssertions.assertVersionSerializable(VersionUtils.randomVersionBetween(random, minVersion, maxVersion), request, - namedWriteableRegistry); - super.sendRequest(node, requestId, action, request, options); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index f9b7e1d3a89..dfa30874221 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -20,21 +20,20 @@ package org.elasticsearch.test.transport; import org.elasticsearch.Version; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.transport.TransportInterceptor; -import org.elasticsearch.transport.TransportService; - import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -43,13 +42,15 @@ import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.MockTcpTransport; import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportServiceAdapter; -import org.elasticsearch.transport.local.LocalTransport; import java.io.IOException; import java.util.Arrays; @@ -57,9 +58,12 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Queue; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.atomic.AtomicBoolean; /** * A mock transport service that allows to simulate different network topology failures. @@ -74,6 +78,7 @@ import java.util.concurrent.CopyOnWriteArrayList; */ public final class MockTransportService extends TransportService { + public static class TestPlugin extends Plugin { @Override public List> getSettings() { @@ -81,22 +86,25 @@ public final class MockTransportService extends TransportService { } } - public static MockTransportService local(Settings settings, Version version, ThreadPool threadPool) { + public static MockTransportService createNewService(Settings settings, Version version, ThreadPool threadPool, + @Nullable ClusterSettings clusterSettings) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); - Transport transport = new LocalTransport(settings, threadPool, namedWriteableRegistry, new NoneCircuitBreakerService()) { - @Override - protected Version getVersion() { - return version; - } - }; - return new MockTransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR); + final Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(settings, Collections.emptyList()), version); + return new MockTransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, clusterSettings); } private final Transport original; - @Inject - public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor) { - super(settings, new LookupTestTransport(transport), threadPool, interceptor); + /** + * Build the service. + * + * @param clusterSettings if non null the the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings + * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. + */ + public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, + @Nullable ClusterSettings clusterSettings) { + super(settings, new LookupTestTransport(transport), threadPool, interceptor, clusterSettings); this.original = transport; } @@ -137,7 +145,10 @@ public final class MockTransportService extends TransportService { * Clears the rule associated with the provided transport address. */ public void clearRule(TransportAddress transportAddress) { - transport().transports.remove(transportAddress); + Transport transport = transport().transports.remove(transportAddress); + if (transport instanceof ClearableTransport) { + ((ClearableTransport) transport).clearRule(); + } } /** @@ -287,7 +298,9 @@ public final class MockTransportService extends TransportService { public void addUnresponsiveRule(TransportAddress transportAddress, final TimeValue duration) { final long startTime = System.currentTimeMillis(); - addDelegate(transportAddress, new DelegateTransport(original) { + addDelegate(transportAddress, new ClearableTransport(original) { + private final Queue requestsToSendWhenCleared = new LinkedBlockingDeque(); + private boolean cleared = false; TimeValue getDelay() { return new TimeValue(duration.millis() - (System.currentTimeMillis() - startTime)); @@ -357,7 +370,9 @@ public final class MockTransportService extends TransportService { final TransportRequest clonedRequest = reg.newRequest(); clonedRequest.readFrom(bStream.bytes().streamInput()); - threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() { + Runnable runnable = new AbstractRunnable() { + AtomicBoolean requestSent = new AtomicBoolean(); + @Override public void onFailure(Exception e) { logger.debug("failed to send delayed request", e); @@ -365,9 +380,31 @@ public final class MockTransportService extends TransportService { @Override protected void doRun() throws IOException { - original.sendRequest(node, requestId, action, clonedRequest, options); + if (requestSent.compareAndSet(false, true)) { + original.sendRequest(node, requestId, action, clonedRequest, options); + } } - }); + }; + + // store the request to send it once the rule is cleared. + synchronized (this) { + if (cleared) { + runnable.run(); + } else { + requestsToSendWhenCleared.add(runnable); + threadPool.schedule(delay, ThreadPool.Names.GENERIC, runnable); + } + } + } + + + @Override + public void clearRule() { + synchronized (this) { + assert cleared == false; + cleared = true; + requestsToSendWhenCleared.forEach(Runnable::run); + } } }); } @@ -550,6 +587,23 @@ public final class MockTransportService extends TransportService { } } + /** + * The delegate transport instances defined in this class mock various kinds of disruption types. This subclass adds a method + * {@link #clearRule()} so that when the disruptions are cleared (see {@link #clearRule(TransportService)}) this gives the + * disruption a possibility to run clean-up actions. + */ + public abstract static class ClearableTransport extends DelegateTransport { + + public ClearableTransport(Transport transport) { + super(transport); + } + + /** + * Called by {@link #clearRule(TransportService)} + */ + public abstract void clearRule(); + } + List activeTracers = new CopyOnWriteArrayList<>(); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index ba831dde092..ad37a7cacb3 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -69,26 +69,31 @@ import static org.hamcrest.Matchers.notNullValue; public abstract class AbstractSimpleTransportTestCase extends ESTestCase { protected ThreadPool threadPool; + // we use always a non-alpha or beta version here otherwise minimumCompatibilityVersion will be different for the two used versions + private static final Version CURRENT_VERSION = Version.fromString(String.valueOf(Version.CURRENT.major) + ".0.0"); + protected static final Version version0 = CURRENT_VERSION.minimumCompatibilityVersion(); + + private ClusterSettings clusterSettings; - protected static final Version version0 = Version.CURRENT.minimumCompatibilityVersion(); protected volatile DiscoveryNode nodeA; protected volatile MockTransportService serviceA; - protected static final Version version1 = Version.fromId(Version.CURRENT.id + 1); + protected static final Version version1 = Version.fromId(CURRENT_VERSION.id + 1); protected volatile DiscoveryNode nodeB; protected volatile MockTransportService serviceB; - protected abstract MockTransportService build(Settings settings, Version version); + protected abstract MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings); @Override @Before public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(getClass().getName()); - serviceA = buildService("TS_A", version0); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + serviceA = buildService("TS_A", version0, clusterSettings); // this one supports dynamic tracer updates nodeA = new DiscoveryNode("TS_A", serviceA.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); // serviceA.setLocalNode(nodeA); - serviceB = buildService("TS_B", version1); + serviceB = buildService("TS_B", version1, null); // this one doesn't support dynamic tracer updates nodeB = new DiscoveryNode("TS_B", serviceB.boundAddress().publishAddress(), emptyMap(), emptySet(), version1); //serviceB.setLocalNode(nodeB); // wait till all nodes are properly connected and the event has been sent, so tests in this class @@ -127,14 +132,15 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceB.removeConnectionListener(waitForConnection); } - private MockTransportService buildService(final String name, final Version version) { + private MockTransportService buildService(final String name, final Version version, ClusterSettings clusterSettings) { MockTransportService service = build( Settings.builder() .put(Node.NODE_NAME_SETTING.getKey(), name) .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .build(), - version); + version, + clusterSettings); service.acceptIncomingRequests(); return service; } @@ -581,7 +587,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { if (i % 3 == 0) { // simulate restart of nodeB serviceB.close(); - MockTransportService newService = buildService("TS_B_" + i, version1); + MockTransportService newService = buildService("TS_B_" + i, version1, null); newService.registerRequestHandler("test", TestRequest::new, ThreadPool.Names.SAME, ignoringRequestHandler); serviceB = newService; nodeB = new DiscoveryNode("TS_B_" + i, "TS_B", serviceB.boundAddress().publishAddress(), emptyMap(), emptySet(), version1); @@ -863,9 +869,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { includeSettings = "test"; excludeSettings = "DOESN'T_MATCH"; } - ClusterSettings service = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - serviceA.setDynamicSettings(service); - service.applySettings(Settings.builder() + clusterSettings.applySettings(Settings.builder() .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), includeSettings) .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), excludeSettings) .build()); @@ -1411,8 +1415,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { fail("message round trip did not complete within a sensible time frame"); } - assertTrue(nodeA.getAddress().sameHost(addressA.get())); - assertTrue(nodeB.getAddress().sameHost(addressB.get())); + assertTrue(nodeA.getAddress().getAddress().equals(addressA.get().getAddress())); + assertTrue(nodeB.getAddress().getAddress().equals(addressB.get().getAddress())); } public void testBlockingIncomingRequests() throws Exception { @@ -1422,7 +1426,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .build(), - version0); + version0, + null); AtomicBoolean requestProcessed = new AtomicBoolean(); service.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, (request, channel) -> { @@ -1539,7 +1544,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .build(), - version0); + version0, + null); DiscoveryNode nodeC = new DiscoveryNode("TS_C", "TS_C", serviceC.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); serviceC.acceptIncomingRequests(); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java b/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java new file mode 100644 index 00000000000..37ebebc64a6 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.NetworkPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; + +import java.util.Collections; +import java.util.List; +import java.util.Random; + +/** + * A transport interceptor that applies {@link ElasticsearchAssertions#assertVersionSerializable(Streamable)} + * to all requests and response objects send across the wire + */ +public final class AssertingTransportInterceptor implements TransportInterceptor { + + private final Random random; + private final NamedWriteableRegistry namedWriteableRegistry; + + public static final class TestPlugin extends Plugin implements NetworkPlugin { + + private final Settings settings; + + public TestPlugin(Settings settings) { + this.settings = settings; + } + + @Override + public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry) { + return Collections.singletonList(new AssertingTransportInterceptor(settings, namedWriteableRegistry)); + } + } + + public AssertingTransportInterceptor(Settings settings, NamedWriteableRegistry namedWriteableRegistry) { + final long seed = ESIntegTestCase.INDEX_TEST_SEED_SETTING.get(settings); + random = new Random(seed); + this.namedWriteableRegistry = namedWriteableRegistry; + } + + @Override + public TransportRequestHandler interceptHandler(String action, String executor, + TransportRequestHandler actualHandler) { + return new TransportRequestHandler() { + + @Override + public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { + assertVersionSerializable(request); + actualHandler.messageReceived(request, channel, task); + } + + @Override + public void messageReceived(T request, TransportChannel channel) throws Exception { + assertVersionSerializable(request); + actualHandler.messageReceived(request, channel); + } + }; + } + + private void assertVersionSerializable(Streamable streamable) { + Version version = VersionUtils.randomVersionBetween(random, Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); + ElasticsearchAssertions.assertVersionSerializable(version, streamable, namedWriteableRegistry); + + } + + @Override + public AsyncSender interceptSender(final AsyncSender sender) { + return new AsyncSender() { + @Override + public void sendRequest(DiscoveryNode node, String action, TransportRequest request, + TransportRequestOptions options, + final TransportResponseHandler handler) { + assertVersionSerializable(request); + sender.sendRequest(node, action, request, options, new TransportResponseHandler() { + @Override + public T newInstance() { + return handler.newInstance(); + } + + @Override + public void handleResponse(T response) { + assertVersionSerializable(response); + handler.handleResponse(response); + } + + @Override + public void handleException(TransportException exp) { + handler.handleException(exp); + } + + @Override + public String executor() { + return handler.executor(); + } + }); + } + }; + } + + +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index a0c81cb63d7..84d0bed8c04 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.CancellableThreads; @@ -184,7 +183,7 @@ public class MockTcpTransport extends TcpTransport } } }; - InetSocketAddress address = ((InetSocketTransportAddress) node.getAddress()).address(); + final InetSocketAddress address = node.getAddress().address(); // we just use a single connections configureSocket(socket); socket.connect(address, (int) TCP_CONNECT_TIMEOUT.get(settings).millis()); @@ -226,7 +225,7 @@ public class MockTcpTransport extends TcpTransport } @Override - protected void sendMessage(MockChannel mockChannel, BytesReference reference, Runnable sendListener, boolean close) throws IOException { + protected void sendMessage(MockChannel mockChannel, BytesReference reference, Runnable sendListener) throws IOException { synchronized (mockChannel) { final Socket socket = mockChannel.activeChannel; OutputStream outputStream = new BufferedOutputStream(socket.getOutputStream()); @@ -236,9 +235,6 @@ public class MockTcpTransport extends TcpTransport if (sendListener != null) { sendListener.run(); } - if (close) { - IOUtils.closeWhileHandlingException(mockChannel); - } } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java index a198ef77956..8338d5e5cfc 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java @@ -22,19 +22,30 @@ import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @SuppressWarnings({"unchecked","varargs"}) public class MockTransportClient extends TransportClient { - private static final Settings DEFAULT_SETTINGS = Settings.builder().put("transport.type.default", "local").build(); + private static final Settings DEFAULT_SETTINGS = Settings.builder().put("transport.type.default", + MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build(); public MockTransportClient(Settings settings, Class... plugins) { - super(settings, DEFAULT_SETTINGS, Arrays.asList(plugins)); + this(settings, Arrays.asList(plugins)); } public MockTransportClient(Settings settings, Collection> plugins) { - super(settings, DEFAULT_SETTINGS, plugins); + super(settings, DEFAULT_SETTINGS, addMockTransportIfMissing(plugins)); + } + + private static Collection> addMockTransportIfMissing(Collection> plugins) { + if (plugins.contains(MockTcpTransportPlugin.class)) { + return plugins; + } + plugins = new ArrayList<>(plugins); + plugins.add(MockTcpTransportPlugin.class); + return plugins; } } diff --git a/test/framework/src/main/resources/log4j2-test.properties b/test/framework/src/main/resources/log4j2-test.properties index f573cace790..828555ed52e 100644 --- a/test/framework/src/main/resources/log4j2-test.properties +++ b/test/framework/src/main/resources/log4j2-test.properties @@ -1,5 +1,3 @@ -status = error - appender.console.type = Console appender.console.name = console appender.console.layout.type = PatternLayout @@ -7,4 +5,3 @@ appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n rootLogger.level = ${sys:tests.es.logger.level:-info} rootLogger.appenderRef.console.ref = console - e \ No newline at end of file diff --git a/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java b/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java index 51a90282b4f..d5cd304659a 100644 --- a/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java +++ b/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchService; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.MockTcpTransportPlugin; import java.io.IOException; import java.util.ArrayList; @@ -40,10 +41,11 @@ public class MockNodeTests extends ESTestCase { public void testComponentsMockedByMarkerPlugins() throws IOException { Settings settings = Settings.builder() // All these are required or MockNode will fail to build. .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put("transport.type", "local") + .put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .put("http.enabled", false) .build(); List> plugins = new ArrayList<>(); + plugins.add(MockTcpTransportPlugin.class); boolean useMockBigArrays = randomBoolean(); boolean useMockSearchService = randomBoolean(); if (useMockBigArrays) { diff --git a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java index d6cd3eea5ac..45ea62f31ea 100644 --- a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java +++ b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java @@ -33,8 +33,9 @@ import org.elasticsearch.test.TestSearchContext; public class MockSearchServiceTests extends ESTestCase { public void testAssertNoInFlightContext() { - SearchContext s = new TestSearchContext(new QueryShardContext(new IndexSettings(IndexMetaData.PROTO, Settings.EMPTY), null, null, - null, null, null, null, null, null, null)) { + final long nowInMillis = randomPositiveLong(); + SearchContext s = new TestSearchContext(new QueryShardContext(0, new IndexSettings(IndexMetaData.PROTO, Settings.EMPTY), null, null, + null, null, null, null, null, null, null, () -> nowInMillis)) { @Override public SearchShardTarget shardTarget() { return new SearchShardTarget("node", new Index("idx", "ignored"), 0); diff --git a/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java b/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java new file mode 100644 index 00000000000..ef6d0265b59 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.hamcrest; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.test.VersionUtils.randomVersion; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertVersionSerializable; + +public class ElasticsearchAssertionsTests extends ESTestCase { + public void testAssertVersionSerializableIsOkWithIllegalArgumentException() { + Version version = randomVersion(random()); + NamedWriteableRegistry registry = new NamedWriteableRegistry(emptyList()); + Streamable testStreamable = new TestStreamable(); + + // Should catch the exception and do nothing. + assertVersionSerializable(version, testStreamable, registry); + } + + public static class TestStreamable implements Streamable { + @Override + public void readFrom(StreamInput in) throws IOException { + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new IllegalArgumentException("Not supported."); + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParserTests.java index f5d46cdd3d6..7473e393e5c 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParserTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParserTests.java @@ -26,6 +26,8 @@ import org.elasticsearch.test.rest.yaml.parser.ClientYamlTestSuiteParseContext; import org.elasticsearch.test.rest.yaml.parser.SkipSectionParser; import org.elasticsearch.test.rest.yaml.section.SkipSection; +import java.util.Arrays; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -108,13 +110,11 @@ public class SkipSectionParserTests extends AbstractParserTestCase { ); SkipSectionParser skipSectionParser = new SkipSectionParser(); - - try { - skipSectionParser.parse(new ClientYamlTestSuiteParseContext("api", "suite", parser)); - fail("Expected RestTestParseException"); - } catch (ClientYamlTestParseException e) { - assertThat(e.getMessage(), is("version or features are mutually exclusive")); - } + SkipSection parse = skipSectionParser.parse(new ClientYamlTestSuiteParseContext("api", "suite", parser)); + assertEquals(VersionUtils.getFirstVersion(), parse.getLowerVersion()); + assertEquals(Version.fromString("0.90.2"), parse.getUpperVersion()); + assertEquals(Arrays.asList("regex"), parse.getFeatures()); + assertEquals("Delete ignores the parent param", parse.getReason()); } public void testParseSkipSectionNoReason() throws Exception { diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParserFailingTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParserFailingTests.java index f2219816462..02995e84bd9 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParserFailingTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParserFailingTests.java @@ -32,6 +32,109 @@ import static org.hamcrest.Matchers.containsString; * stream */ public class ClientYamlSuiteRestApiParserFailingTests extends ESTestCase { + + public void testDuplicateMethods() throws Exception { + parseAndExpectFailure("{\n" + + " \"ping\": {" + + " \"documentation\": \"http://www.elasticsearch.org/guide/\"," + + " \"methods\": [\"PUT\", \"PUT\"]," + + " \"url\": {" + + " \"path\": \"/\"," + + " \"paths\": [\"/\"]," + + " \"parts\": {" + + " }," + + " \"params\": {" + + " \"type\" : \"boolean\",\n" + + " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"" + + " }" + + " }," + + " \"body\": null" + + " }" + + "}", "Found duplicate method [PUT]"); + } + + public void testDuplicatePaths() throws Exception { + parseAndExpectFailure("{\n" + + " \"ping\": {" + + " \"documentation\": \"http://www.elasticsearch.org/guide/\"," + + " \"methods\": [\"PUT\"]," + + " \"url\": {" + + " \"path\": \"/pingone\"," + + " \"paths\": [\"/pingone\", \"/pingtwo\", \"/pingtwo\"]," + + " \"parts\": {" + + " }," + + " \"params\": {" + + " \"type\" : \"boolean\",\n" + + " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"" + + " }" + + " }," + + " \"body\": null" + + " }" + + "}", "Found duplicate path [/pingtwo]"); + } + + public void testDuplicateParts() throws Exception { + parseAndExpectFailure("{\n" + + " \"ping\": {" + + " \"documentation\": \"http://www.elasticsearch.org/guide/\"," + + " \"methods\": [\"PUT\"]," + + " \"url\": {" + + " \"path\": \"/\"," + + " \"paths\": [\"/\"]," + + " \"parts\": {" + + " \"index\": {" + + " \"type\" : \"string\",\n" + + " \"description\" : \"index part\"\n" + + " }," + + " \"type\": {" + + " \"type\" : \"string\",\n" + + " \"description\" : \"type part\"\n" + + " }," + + " \"index\": {" + + " \"type\" : \"string\",\n" + + " \"description\" : \"index parameter part\"\n" + + " }" + + " }," + + " \"params\": {" + + " \"type\" : \"boolean\",\n" + + " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"" + + " }" + + " }," + + " \"body\": null" + + " }" + + "}", "Found duplicate part [index]"); + } + + public void testDuplicateParams() throws Exception { + parseAndExpectFailure("{\n" + + " \"ping\": {" + + " \"documentation\": \"http://www.elasticsearch.org/guide/\"," + + " \"methods\": [\"PUT\"]," + + " \"url\": {" + + " \"path\": \"/\"," + + " \"paths\": [\"/\"]," + + " \"parts\": {" + + " }," + + " \"params\": {" + + " \"timeout\": {" + + " \"type\" : \"string\",\n" + + " \"description\" : \"timeout parameter\"\n" + + " }," + + " \"refresh\": {" + + " \"type\" : \"string\",\n" + + " \"description\" : \"refresh parameter\"\n" + + " }," + + " \"timeout\": {" + + " \"type\" : \"string\",\n" + + " \"description\" : \"timeout parameter again\"\n" + + " }" + + " }" + + " }," + + " \"body\": null" + + " }" + + "}", "Found duplicate param [timeout]"); + } + public void testBrokenSpecShouldThrowUsefulExceptionWhenParsingFailsOnParams() throws Exception { parseAndExpectFailure(BROKEN_SPEC_PARAMS, "Expected params field in rest api definition to contain an object"); } @@ -42,12 +145,10 @@ public class ClientYamlSuiteRestApiParserFailingTests extends ESTestCase { private void parseAndExpectFailure(String brokenJson, String expectedErrorMessage) throws Exception { XContentParser parser = JsonXContent.jsonXContent.createParser(brokenJson); - try { - new ClientYamlSuiteRestApiParser().parse("location", parser); - fail("Expected to fail parsing but did not happen"); - } catch (IOException e) { - assertThat(e.getMessage(), containsString(expectedErrorMessage)); - } + ClientYamlSuiteRestApiParser restApiParser = new ClientYamlSuiteRestApiParser(); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> restApiParser.parse("location", parser)); + assertThat(e.getMessage(), containsString(expectedErrorMessage)); } // see params section is broken, an inside param is missing diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java new file mode 100644 index 00000000000..c8f7b351282 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.Version; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Collections; + +public class SkipSectionTests extends ESTestCase { + + public void testSkip() { + SkipSection section = new SkipSection("2.0.0 - 2.1.0", randomBoolean() ? Collections.emptyList() : + Arrays.asList("warnings"), "foobar"); + assertFalse(section.skip(Version.CURRENT)); + assertTrue(section.skip(Version.V_2_0_0)); + section = new SkipSection(randomBoolean() ? null : "2.0.0 - 2.1.0", Arrays.asList("boom"), "foobar"); + assertTrue(section.skip(Version.CURRENT)); + } + + public void testMessage() { + SkipSection section = new SkipSection("2.0.0 - 2.1.0", Arrays.asList("warnings"), "foobar"); + assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); + section = new SkipSection(null, Arrays.asList("warnings"), "foobar"); + assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); + section = new SkipSection(null, Arrays.asList("warnings"), null); + assertEquals("[FOOBAR] skipped, unsupported features [warnings]", section.getSkipMessage("FOOBAR")); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index f0b7454fe9d..7c001f910d7 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -27,12 +27,14 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; +import org.elasticsearch.test.discovery.MockZenPing; +import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.TransportSettings; import java.io.IOException; @@ -139,14 +141,13 @@ public class InternalTestClusterTests extends ESTestCase { NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2 * ((masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes)) .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") - .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build(); } @Override public Settings transportClientSettings() { return Settings.builder() - .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build(); } }; @@ -154,12 +155,13 @@ public class InternalTestClusterTests extends ESTestCase { String nodePrefix = "foobar"; Path baseDir = createTempDir(); + final List> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class); InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity()); + enableHttpPipelining, nodePrefix, mockPlugins, Function.identity()); InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity()); + enableHttpPipelining, nodePrefix, mockPlugins, Function.identity()); assertClusters(cluster0, cluster1, false); long seed = randomLong(); @@ -202,14 +204,13 @@ public class InternalTestClusterTests extends ESTestCase { .put( NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2 + (masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "local") - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") + .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .build(); } @Override public Settings transportClientSettings() { return Settings.builder() - .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build(); } }; boolean enableHttpPipelining = randomBoolean(); @@ -217,7 +218,8 @@ public class InternalTestClusterTests extends ESTestCase { Path baseDir = createTempDir(); InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity()); + enableHttpPipelining, nodePrefix, Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class), + Function.identity()); try { cluster.beforeTest(random(), 0.0); final Map shardNodePaths = new HashMap<>(); @@ -285,17 +287,16 @@ public class InternalTestClusterTests extends ESTestCase { return Settings.builder() .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numNodes) .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "local") - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") + .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0).build(); } @Override public Settings transportClientSettings() { return Settings.builder() - .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build(); } - }, 0, randomBoolean(), "", Collections.emptyList(), Function.identity()); + }, 0, randomBoolean(), "", Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class), Function.identity()); cluster.beforeTest(random(), 0.0); try { Map> pathsPerRole = new HashMap<>(); diff --git a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java index e6a563b3e89..7ed12d249a5 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -30,12 +31,12 @@ import java.util.Collections; public class MockTcpTransportTests extends AbstractSimpleTransportTestCase { @Override - protected MockTransportService build(Settings settings, Version version) { + protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(settings, Collections.emptyList()), version); MockTransportService mockTransportService = new MockTransportService(Settings.EMPTY, transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR); + TransportService.NOOP_TRANSPORT_INTERCEPTOR, clusterSettings); mockTransportService.start(); return mockTransportService; }