From 16c7689355ecfe3132acd6a03d8a426d8608fbd6 Mon Sep 17 00:00:00 2001 From: Shay Banon Date: Fri, 1 May 2015 19:03:14 +0200 Subject: [PATCH 01/21] Exclude jackson-databind dependency the jackson yaml data format pulls in the databind dependency, its important that we exclude it so we won't use any of its classes by mistake --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index a3611822d9a..432aa22744e 100644 --- a/pom.xml +++ b/pom.xml @@ -267,6 +267,12 @@ jackson-dataformat-yaml 2.5.1 compile + + + com.fasterxml.jackson.core + jackson-databind + + From c28bf3bb3f9c46ad7e20dcf0d6251db02a9e1697 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Fri, 1 May 2015 20:37:26 +0200 Subject: [PATCH 02/21] Docs: Updated elasticsearch.org links to elastic.co --- docs/community/clients.asciidoc | 10 +- docs/community/index.asciidoc | 2 +- docs/groovy-api/index.asciidoc | 4 +- docs/java-api/index.asciidoc | 2 +- docs/javascript/index.asciidoc | 138 ------------------ docs/reference/getting-started.asciidoc | 8 +- docs/reference/migration/migrate_1_0.asciidoc | 2 +- docs/reference/modules/plugins.asciidoc | 2 +- docs/reference/setup.asciidoc | 2 +- docs/reference/setup/repositories.asciidoc | 10 +- docs/reference/setup/upgrade.asciidoc | 4 +- docs/resiliency/index.asciidoc | 6 +- 12 files changed, 26 insertions(+), 164 deletions(-) delete mode 100644 docs/javascript/index.asciidoc diff --git a/docs/community/clients.asciidoc b/docs/community/clients.asciidoc index 5455440e114..e0205816ca0 100644 --- a/docs/community/clients.asciidoc +++ b/docs/community/clients.asciidoc @@ -50,13 +50,13 @@ See the {client}/ruby-api/current/index.html[official Elasticsearch Ruby client] * https://github.com/ddnexus/flex[Flex]: Ruby Client. - + * https://github.com/printercu/elastics-rb[elastics]: Tiny client with built-in zero-downtime migrations and ActiveRecord integration. - + * https://github.com/toptal/chewy[chewy]: - Chewy is ODM and wrapper for official elasticsearch client - + Chewy is ODM and wrapper for official elasticsearch client + * https://github.com/ankane/searchkick[Searchkick]: Intelligent search made easy @@ -82,7 +82,7 @@ See the {client}/php-api/current/index.html[official Elasticsearch PHP client]. * https://github.com/searchbox-io/Jest[Jest]: Java Rest client. -* There is of course the http://www.elasticsearch.org/guide/en/elasticsearch/client/java-api/current/index.html[native ES Java client] +* There is of course the {client}/java-api/current/index.html[native ES Java client] [[community-javascript]] === JavaScript diff --git a/docs/community/index.asciidoc b/docs/community/index.asciidoc index 88135d89563..48b2f2ad8c1 100644 --- a/docs/community/index.asciidoc +++ b/docs/community/index.asciidoc @@ -1,6 +1,6 @@ = Community Supported Clients -:client: http://www.elasticsearch.org/guide/en/elasticsearch/client +:client: http://www.elastic.co/guide/en/elasticsearch/client include::clients.asciidoc[] diff --git a/docs/groovy-api/index.asciidoc b/docs/groovy-api/index.asciidoc index 5ab4bf61318..5e06cd1f2f4 100644 --- a/docs/groovy-api/index.asciidoc +++ b/docs/groovy-api/index.asciidoc @@ -1,6 +1,6 @@ = Groovy API -:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current -:java: http://www.elasticsearch.org/guide/en/elasticsearch/client/java-api/current +:ref: http://www.elastic.co/guide/en/elasticsearch/reference/current +:java: http://www.elastic.co/guide/en/elasticsearch/client/java-api/current [preface] == Preface diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc index e626fcad7bd..6145e2918d8 100644 --- a/docs/java-api/index.asciidoc +++ b/docs/java-api/index.asciidoc @@ -1,6 +1,6 @@ [[java-api]] = Java API -:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current +:ref: http://www.elastic.co/guide/en/elasticsearch/reference/current [preface] == Preface diff --git a/docs/javascript/index.asciidoc b/docs/javascript/index.asciidoc deleted file mode 100644 index 67a2a73a2e6..00000000000 --- a/docs/javascript/index.asciidoc +++ /dev/null @@ -1,138 +0,0 @@ -= elasticsearch-js - -== Overview - -Official low-level client for Elasticsearch. Its goal is to provide common -ground for all Elasticsearch-related code in JavaScript; because of this it tries -to be opinion-free and very extendable. - -The full documentation is available at http://elasticsearch.github.io/elasticsearch-js - - -=== Getting the Node.js module - -To install the module into an existing Node.js project use npm: - -[source,sh] ------------------------------------- -npm install elasticsearch ------------------------------------- - -=== Getting the browser client - -For a browser-based projects, builds for modern browsers are available http://elasticsearch.github.io/elasticsearch-js#browser-builds[here]. Download one of the archives and extract it, inside you'll find three files, pick the one that best matches your environment: - - * elasticsearch.jquery.js - for projects that already use jQuery - * elasticsearch.angular.js - for Angular projects - * elasticsearch.js - generic build for all other projects - -Each of the library specific builds tie into the AJAX and Promise creation facilities provided by their respective libraries. This is an example of how Elasticsearch.js can be extended to provide a more opinionated approach when appropriate. - -=== Setting up the client - -Now you are ready to get busy! First thing you'll need to do is create an instance of `elasticsearch.Client`. Here are several examples of configuration parameters you can use when creating that instance. For a full list of configuration options see http://elasticsearch.github.io/elasticsearch-js/index.html#configuration[the configuration docs]. - -[source,javascript] ------------------------------------- -var elasticsearch = require('elasticsearch'); - -// Connect to localhost:9200 and use the default settings -var client = new elasticsearch.Client(); - -// Connect the client to two nodes, requests will be -// load-balanced between them using round-robin -var client = elasticsearch.Client({ - hosts: [ - 'elasticsearch1:9200', - 'elasticsearch2:9200' - ] -}); - -// Connect to the this host's cluster, sniff -// for the rest of the cluster right away, and -// again every 5 minutes -var client = elasticsearch.Client({ - host: 'elasticsearch1:9200', - sniffOnStart: true, - sniffInterval: 300000 -}); - -// Connect to this host using https, basic auth, -// a path prefix, and static query string values -var client = new elasticsearch.Client({ - host: 'https://user:password@elasticsearch1/search?app=blog' -}); ------------------------------------- - - -=== Setting up the client in the browser - -The params accepted by the `Client` constructor are the same in the browser versions of the client, but how you access the Client constructor is different based on the build you are using. Below is an example of instantiating a client in each build. - -[source,javascript] ------------------------------------- -// elasticsearch.js adds the elasticsearch namespace to the window -var client = elasticsearch.Client({ ... }); - -// elasticsearch.jquery.js adds the es namespace to the jQuery object -var client = jQuery.es.Client({ ... }); - -// elasticsearch.angular.js creates an elasticsearch -// module, which provides an esFactory -var app = angular.module('app', ['elasticsearch']); -app.service('es', function (esFactory) { - return esFactory({ ... }); -}); ------------------------------------- - -=== Using the client instance to make API calls. - -Once you create the client, making API calls is simple. - -[source,javascript] ------------------------------------- -// get the current status of the entire cluster. -// Note: params are always optional, you can just send a callback -client.cluster.health(function (err, resp) { - if (err) { - console.error(err.message); - } else { - console.dir(resp); - } -}); - -// index a document -client.index({ - index: 'blog', - type: 'post', - id: 1, - body: { - title: 'JavaScript Everywhere!', - content: 'It all started when...', - date: '2013-12-17' - } -}, function (err, resp) { - // ... -}); - -// search for documents (and also promises!!) -client.search({ - index: 'users', - size: 50, - body: { - query: { - match: { - profile: 'elasticsearch' - } - } - } -}).then(function (resp) { - var hits = resp.body.hits; -}); ------------------------------------- - -== Copyright and License - -This software is Copyright (c) 2013-2015 by Elasticsearch BV. - -This is free software, licensed under The Apache License Version 2.0. diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index d5e9adbbf8b..358c5f09ef8 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -89,7 +89,7 @@ The number of shards and replicas can be defined per index at the time the index By default, each index in Elasticsearch is allocated 5 primary shards and 1 replica which means that if you have at least two nodes in your cluster, your index will have 5 primary shards and another 5 replica shards (1 complete replica) for a total of 10 shards per index. -NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. +NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. You can monitor shard sizes using the <> api. With that out of the way, let's get started with the fun part... @@ -104,13 +104,13 @@ java -version echo $JAVA_HOME -------------------------------------------------- -Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elasticsearch.org/download[`www.elasticsearch.org/download`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, or a `DEB` or `RPM` package. For simplicity, let's use the tar file. +Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elastic.co/downloads[`www.elastic.co/downloads`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, or a `DEB` or `RPM` package. For simplicity, let's use the tar file. Let's download the Elasticsearch {version} tar as follows (Windows users should download the zip package): ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -curl -L -O https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-{version}.tar.gz +curl -L -O https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-{version}.tar.gz -------------------------------------------------- Then extract it as follows (Windows users should unzip the zip package): @@ -868,7 +868,7 @@ In the previous section, we skipped over a little detail called the document sco All queries in Elasticsearch trigger computation of the relevance scores. In cases where we do not need the relevance scores, Elasticsearch provides another query capability in the form of <. Filters are similar in concept to queries except that they are optimized for much faster execution speeds for two primary reasons: * Filters do not score so they are faster to execute than queries -* Filters can be http://www.elasticsearch.org/blog/all-about-elasticsearch-filter-bitsets/[cached in memory] allowing repeated search executions to be significantly faster than queries +* Filters can be http://www.elastic.co/blog/all-about-elasticsearch-filter-bitsets/[cached in memory] allowing repeated search executions to be significantly faster than queries To understand filters, let's first introduce the <>, which allows you to combine a query (like `match_all`, `match`, `bool`, etc.) together with a filter. As an example, let's introduce the <>, which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. diff --git a/docs/reference/migration/migrate_1_0.asciidoc b/docs/reference/migration/migrate_1_0.asciidoc index aca40b33efb..f6cfd4f92a9 100644 --- a/docs/reference/migration/migrate_1_0.asciidoc +++ b/docs/reference/migration/migrate_1_0.asciidoc @@ -362,7 +362,7 @@ in the query string. === Percolator The percolator has been redesigned and because of this the dedicated `_percolator` index is no longer used by the percolator, -but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elasticsearch.org/blog/percolator-redesign-blog-post/[redesigned percolator] +but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elastic.co/blog/percolator-redesign-blog-post[redesigned percolator] blog post for the reasons why the percolator has been redesigned. Elasticsearch will *not* delete the `_percolator` index when upgrading, only the percolate api will not use the queries diff --git a/docs/reference/modules/plugins.asciidoc b/docs/reference/modules/plugins.asciidoc index 25f01a4715e..c06fc9c6e57 100644 --- a/docs/reference/modules/plugins.asciidoc +++ b/docs/reference/modules/plugins.asciidoc @@ -26,7 +26,7 @@ plugin --install // ----------------------------------- The plugins will be -automatically downloaded in this case from `download.elasticsearch.org`, +automatically downloaded in this case from `download.elastic.co`, and in case they don't exist there, from maven (central and sonatype). Note that when the plugin is located in maven central or sonatype diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index b91d8ea17bb..f0d8fdff4d3 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -4,7 +4,7 @@ [partintro] -- This section includes information on how to setup *elasticsearch* and -get it running. If you haven't already, http://www.elasticsearch.org/download[download] it, and +get it running. If you haven't already, http://www.elastic.co/downloads[download] it, and then check the <> docs. NOTE: Elasticsearch can also be installed from our repositories using `apt` or `yum`. diff --git a/docs/reference/setup/repositories.asciidoc b/docs/reference/setup/repositories.asciidoc index f8fe939604c..3bf693d33ea 100644 --- a/docs/reference/setup/repositories.asciidoc +++ b/docs/reference/setup/repositories.asciidoc @@ -22,14 +22,14 @@ Download and install the Public Signing Key: [source,sh] -------------------------------------------------- -wget -qO - https://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add - +wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add - -------------------------------------------------- Add the repository definition to your `/etc/apt/sources.list` file: ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -echo "deb http://packages.elasticsearch.org/elasticsearch/{branch}/debian stable main" | sudo tee -a /etc/apt/sources.list +echo "deb http://packages.elastic.co/elasticsearch/{branch}/debian stable main" | sudo tee -a /etc/apt/sources.list -------------------------------------------------- [WARNING] @@ -65,7 +65,7 @@ Download and install the public signing key: [source,sh] -------------------------------------------------- -rpm --import https://packages.elasticsearch.org/GPG-KEY-elasticsearch +rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch -------------------------------------------------- Add the following in your `/etc/yum.repos.d/` directory @@ -75,9 +75,9 @@ in a file with a `.repo` suffix, for example `elasticsearch.repo` -------------------------------------------------- [elasticsearch-{branch}] name=Elasticsearch repository for {branch}.x packages -baseurl=http://packages.elasticsearch.org/elasticsearch/{branch}/centos +baseurl=http://packages.elastic.co/elasticsearch/{branch}/centos gpgcheck=1 -gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch +gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch enabled=1 -------------------------------------------------- diff --git a/docs/reference/setup/upgrade.asciidoc b/docs/reference/setup/upgrade.asciidoc index 3a87a049563..9f9e745808f 100644 --- a/docs/reference/setup/upgrade.asciidoc +++ b/docs/reference/setup/upgrade.asciidoc @@ -69,7 +69,7 @@ $ curl -XPUT 'http://localhost:9200/_cluster/settings' -d '{ [float] ==== 1.0 and later -To back up a running 1.0 or later system, it is simplest to use the snapshot feature. Complete instructions for backup and restore with snapshots are available http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-snapshots.html[here]. +To back up a running 1.0 or later system, it is simplest to use the snapshot feature. See the complete instructions for <>. [float] [[rolling-upgrades]] @@ -96,7 +96,7 @@ This syntax applies to Elasticsearch 1.0 and later: * Confirm that all shards are correctly reallocated to the remaining running nodes. -* Upgrade the stopped node. To upgrade using a zip or compressed tarball from elasticsearch.org: +* Upgrade the stopped node. To upgrade using a zip or compressed tarball from elastic.co: ** Extract the zip or tarball to a new directory, usually in the same volume as the current Elasticsearch installation. Do not overwrite the existing installation, as the downloaded archive will contain a default elasticsearch.yml file and will overwrite your existing configuration. ** Copy the configuration files from the old Elasticsearch installation's config directory to the new Elasticsearch installation's config directory. Move data files from the old Elasticsesarch installation's data directory if necessary. If data files are not located within the tarball's extraction directory, they will not have to be moved. ** The simplest solution for moving from one version to another is to have a symbolic link for 'elasticsearch' that points to the currently running version. This link can be easily updated and will provide a stable access point to the most recent version. Update this symbolic link if it is being used. diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 4618a1d94b9..d52e8804392 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -22,10 +22,10 @@ improvements throughout this page to provide the full context. If you’re interested in more on how we approach ensuring resiliency in Elasticsearch, you may be interested in Igor Motov’s recent talk -http://www.elasticsearch.org/videos/improving-elasticsearch-resiliency/[Improving Elasticsearch Resiliency]. +http://www.elastic.co/videos/improving-elasticsearch-resiliency[Improving Elasticsearch Resiliency]. You may also be interested in our blog post -http://www.elasticsearch.org/blog/resiliency-elasticsearch/[Resiliency in Elasticsearch], +http://www.elastic.co/blog/resiliency-elasticsearch[Resiliency in Elasticsearch], which details our thought processes when addressing resiliency in both Elasticsearch and the work our developers do upstream in Apache Lucene. @@ -416,7 +416,7 @@ The Snapshot/Restore API supports a number of different repository types for sto [float] === Circuit Breaker: Fielddata (STATUS: DONE, v1.0.0) -Currently, the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0. +Currently, the https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0. [float] === Use of Paginated Data Structures to Ease Garbage Collection (STATUS: DONE, v1.0.0 & v1.2.0) From 09ff11812ee07a88a5ef5b551c62b19f43933795 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 1 May 2015 14:58:18 -0400 Subject: [PATCH 03/21] add debugging --- .../org/elasticsearch/bootstrap/Security.java | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 67ac531f0e7..d5910e36202 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -20,8 +20,10 @@ package org.elasticsearch.bootstrap; import com.google.common.io.ByteStreams; + import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.env.Environment; import java.io.*; @@ -29,6 +31,9 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.security.NoSuchAlgorithmException; +import java.security.Policy; +import java.security.URIParameter; /** * Initializes securitymanager with necessary permissions. @@ -45,6 +50,7 @@ class Security { * Initializes securitymanager for the environment * Can only happen once! */ + @SuppressForbidden(reason = "just debugging") static void configure(Environment environment) throws IOException { // init lucene random seed. it will use /dev/urandom where available. StringHelper.randomId(); @@ -54,8 +60,19 @@ class Security { } Path newConfig = processTemplate(config, environment); System.setProperty("java.security.policy", newConfig.toString()); + try { + Policy policy = Policy.getInstance("JavaPolicy", new URIParameter(newConfig.toUri())); + System.out.println(policy.getPermissions(Security.class.getProtectionDomain())); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(); + } System.setSecurityManager(new SecurityManager()); - IOUtils.deleteFilesIgnoringExceptions(newConfig); // TODO: maybe log something if it fails? + try { + // don't hide securityexception here, it means java.io.tmpdir is not accessible! + Files.delete(newConfig); + } catch (IOException ignore) { + // e.g. virus scanner on windows + } } // package-private for testing From df1914cb21d696790875c11c79c521132f424304 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Fri, 1 May 2015 21:30:24 +0200 Subject: [PATCH 04/21] Java API docs: Removed mlt-field --- docs/java-api/query-dsl-queries.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/java-api/query-dsl-queries.asciidoc b/docs/java-api/query-dsl-queries.asciidoc index afded7d9785..92e0982d4e5 100644 --- a/docs/java-api/query-dsl-queries.asciidoc +++ b/docs/java-api/query-dsl-queries.asciidoc @@ -234,7 +234,7 @@ QueryBuilder qb = matchAllQuery(); [[mlt]] -=== More Like This (Field) Query (mlt and mlt_field) +=== More Like This Query (mlt) See: * {ref}/query-dsl-mlt-query.html[More Like This Query] From 6e6949d3f4183f223b2b74553b9400d28e3aaaed Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 1 May 2015 15:47:50 -0400 Subject: [PATCH 05/21] Add debugging when security init screws up (or at trace level if you wish) --- .../org/elasticsearch/bootstrap/Security.java | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index d5910e36202..49c2bccf827 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -21,9 +21,9 @@ package org.elasticsearch.bootstrap; import com.google.common.io.ByteStreams; -import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; -import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.env.Environment; import java.io.*; @@ -32,6 +32,7 @@ import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.security.NoSuchAlgorithmException; +import java.security.PermissionCollection; import java.security.Policy; import java.security.URIParameter; @@ -50,8 +51,8 @@ class Security { * Initializes securitymanager for the environment * Can only happen once! */ - @SuppressForbidden(reason = "just debugging") static void configure(Environment environment) throws IOException { + ESLogger log = Loggers.getLogger(Security.class); // init lucene random seed. it will use /dev/urandom where available. StringHelper.randomId(); InputStream config = Security.class.getResourceAsStream(POLICY_RESOURCE); @@ -60,16 +61,23 @@ class Security { } Path newConfig = processTemplate(config, environment); System.setProperty("java.security.policy", newConfig.toString()); + // retrieve the parsed policy we created: its useful if something goes wrong + Policy policy = null; try { - Policy policy = Policy.getInstance("JavaPolicy", new URIParameter(newConfig.toUri())); - System.out.println(policy.getPermissions(Security.class.getProtectionDomain())); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(); + policy = Policy.getInstance("JavaPolicy", new URIParameter(newConfig.toUri())); + } catch (NoSuchAlgorithmException impossible) { + throw new RuntimeException(impossible); } + PermissionCollection permissions = policy.getPermissions(Security.class.getProtectionDomain()); + log.trace("generated permissions: {}", permissions); + System.setSecurityManager(new SecurityManager()); try { // don't hide securityexception here, it means java.io.tmpdir is not accessible! Files.delete(newConfig); + } catch (SecurityException broken) { + log.error("unable to properly access temporary files, permissions: {}", permissions); + throw broken; } catch (IOException ignore) { // e.g. virus scanner on windows } From dbcdb40f68c3b85f9dda3c28a5796a7205d70079 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 1 May 2015 16:02:00 -0400 Subject: [PATCH 06/21] fix sigar policy line that cannot be really working --- src/main/resources/org/elasticsearch/bootstrap/security.policy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/resources/org/elasticsearch/bootstrap/security.policy b/src/main/resources/org/elasticsearch/bootstrap/security.policy index ffc0032d4a0..4b9b9699e80 100644 --- a/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -34,7 +34,7 @@ grant { // project base directory permission java.io.FilePermission "${project.basedir}${/}target${/}-", "read"; // read permission for lib sigar - permission java.io.FilePermission "${project.basedir}${/}lib/sigar{/}-", "read"; + permission java.io.FilePermission "${project.basedir}${/}lib${/}sigar${/}-", "read"; // mvn custom ./m2/repository for dependency jars permission java.io.FilePermission "${m2.repository}${/}-", "read"; From 50a785c5463e1e810d6cdaf6ab25662579a14024 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 1 May 2015 16:41:49 -0400 Subject: [PATCH 07/21] add a hack to see if this fixes windows issues --- src/main/resources/org/elasticsearch/bootstrap/security.policy | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/resources/org/elasticsearch/bootstrap/security.policy b/src/main/resources/org/elasticsearch/bootstrap/security.policy index 4b9b9699e80..0b88aba449d 100644 --- a/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -28,6 +28,7 @@ grant { // temporary files permission java.io.FilePermission "${java.io.tmpdir}", "read,write"; + permission java.io.FilePermission "${java.io.tmpdir}-", "read,write,delete"; permission java.io.FilePermission "${java.io.tmpdir}${/}-", "read,write,delete"; // paths used for running tests From 9a1f11da6ec2790da14641791a1825f6eb924445 Mon Sep 17 00:00:00 2001 From: Karel Minarik Date: Sat, 2 May 2015 12:54:22 +0200 Subject: [PATCH 08/21] Trimmed the main `elasticsearch.yml` configuration file The main `elasticsearch.yml` file mixed configuration, documentation and advice together. Due to a much improved documentation at , the content has been trimmed, and only the essential settings have been left, to prevent the urge to excessive over-configuration. Related: 8d0f1a7d123f579fc772e82ef6b9aae08f6d13fd --- config/elasticsearch.yml | 375 ++++++--------------------------------- 1 file changed, 53 insertions(+), 322 deletions(-) diff --git a/config/elasticsearch.yml b/config/elasticsearch.yml index 3384a5ee616..b3baf765b3a 100644 --- a/config/elasticsearch.yml +++ b/config/elasticsearch.yml @@ -1,368 +1,99 @@ -##################### Elasticsearch Configuration Example ##################### - -# This file contains an overview of various configuration settings, -# targeted at operations staff. Application developers should -# consult the guide at . +# ======================== Elasticsearch Configuration ========================= # -# The installation procedure is covered at -# . +# NOTE: Elasticsearch comes with reasonable defaults for most settings. +# Before you set out to tweak and tune the configuration, make sure you +# understand what are you trying to accomplish and the consequences. # -# Elasticsearch comes with reasonable defaults for most settings, -# so you can try it out without bothering with configuration. +# The primary way of configuring a node is via this file. This template lists +# the most important settings you may want to configure for a production cluster. # -# Most of the time, these defaults are just fine for running a production -# cluster. If you're fine-tuning your cluster, or wondering about the -# effect of certain configuration option, please _do ask_ on the -# mailing list or IRC channel [http://elasticsearch.org/community]. - -# Any element in the configuration can be replaced with environment variables -# by placing them in ${...} notation. For example: +# Please see the documentation for further information on configuration options: +# # -#node.rack: ${RACK_ENV_VAR} - -# For information on supported formats and syntax for the config file, see -# - - -################################### Cluster ################################### - -# Cluster name identifies your cluster for auto-discovery. If you're running -# multiple clusters on the same network, make sure you're using unique names. +# ---------------------------------- Cluster ----------------------------------- # -#cluster.name: elasticsearch - - -#################################### Node ##################################### - -# Node names are generated dynamically on startup, so you're relieved -# from configuring them manually. You can tie this node to a specific name: +# Use a descriptive name for your cluster: # -#node.name: "Franz Kafka" - -# Every node can be configured to allow or deny being eligible as the master, -# and to allow or deny to store the data. +# cluster.name: my-application # -# Allow this node to be eligible as a master node (enabled by default): +# ------------------------------------ Node ------------------------------------ # -#node.master: true +# Use a descriptive name for the node: # -# Allow this node to store data (enabled by default): +# node.name: node-1 # -#node.data: true - -# You can exploit these settings to design advanced cluster topologies. +# Add custom attributes to the node: # -# 1. You want this node to never become a master node, only to hold data. -# This will be the "workhorse" of your cluster. +# node.rack: r1 # -#node.master: false -#node.data: true +# ----------------------------------- Paths ------------------------------------ # -# 2. You want this node to only serve as a master: to not store any data and -# to have free resources. This will be the "coordinator" of your cluster. +# Path to directory where to store the data (separate multiple locations by comma): # -#node.master: true -#node.data: false +# path.data: /path/to/data # -# 3. You want this node to be neither master nor data node, but -# to act as a "search load balancer" (fetching data from nodes, -# aggregating results, etc.) -# -#node.master: false -#node.data: false - -# Use the Cluster Health API [http://localhost:9200/_cluster/health], the -# Node Info API [http://localhost:9200/_nodes] or GUI tools -# such as , -# , -# and -# to inspect the cluster state. - -# A node can have generic attributes associated with it, which can later be used -# for customized shard allocation filtering, or allocation awareness. An attribute -# is a simple key value pair, similar to node.key: value, here is an example: -# -#node.rack: rack314 - -# By default, multiple nodes are allowed to start from the same installation location -# to disable it, set the following: -#node.max_local_storage_nodes: 1 - - -#################################### Index #################################### - -# You can set a number of options (such as shard/replica options, mapping -# or analyzer definitions, translog settings, ...) for indices globally, -# in this file. -# -# Note, that it makes more sense to configure index settings specifically for -# a certain index, either when creating it or by using the index templates API. -# -# See and -# -# for more information. - -# Set the number of shards (splits) of an index (5 by default): -# -#index.number_of_shards: 5 - -# Set the number of replicas (additional copies) of an index (1 by default): -# -#index.number_of_replicas: 1 - -# Note, that for development on a local machine, with small indices, it usually -# makes sense to "disable" the distributed features: -# -#index.number_of_shards: 1 -#index.number_of_replicas: 0 - -# These settings directly affect the performance of index and search operations -# in your cluster. Assuming you have enough machines to hold shards and -# replicas, the rule of thumb is: -# -# 1. Having more *shards* enhances the _indexing_ performance and allows to -# _distribute_ a big index across machines. -# 2. Having more *replicas* enhances the _search_ performance and improves the -# cluster _availability_. -# -# The "number_of_shards" is a one-time setting for an index. -# -# The "number_of_replicas" can be increased or decreased anytime, -# by using the Index Update Settings API. -# -# Elasticsearch takes care about load balancing, relocating, gathering the -# results from nodes, etc. Experiment with different settings to fine-tune -# your setup. - -# Use the Index Status API () to inspect -# the index status. - - -#################################### Paths #################################### - -# Path to directory containing configuration (this file and logging.yml): -# -#path.conf: /path/to/conf - -# Path to directory where to store index data allocated for this node. -# -#path.data: /path/to/data -# -# Can optionally include more than one location, causing data to be striped across -# the locations (a la RAID 0) on a file level, favouring locations with most free -# space on creation. For example: -# -#path.data: /path/to/data1,/path/to/data2 - # Path to log files: # -#path.logs: /path/to/logs - -# Path to where plugins are installed: +# path.logs: /path/to/logs # -#path.plugins: /path/to/plugins - - -#################################### Plugin ################################### - -# If a plugin listed here is not installed for current node, the node will not start. +# ----------------------------------- Memory ----------------------------------- # -#plugin.mandatory: mapper-attachments,lang-groovy - - -################################### Memory #################################### - -# Elasticsearch performs poorly when JVM starts swapping: you should ensure that -# it _never_ swaps. +# Lock the memory on startup: # -# Set this property to true to lock the memory: +# bootstrap.mlockall: true # -#bootstrap.mlockall: true - -# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set -# to the same value, and that the machine has enough memory to allocate -# for Elasticsearch, leaving enough memory for the operating system itself. +# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory +# available on the system and that the owner of the process is allowed to use this limit. # -# You should also make sure that the Elasticsearch process is allowed to lock -# the memory, eg. by using `ulimit -l unlimited`. - - -############################## Network And HTTP ############################### - -# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens -# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node -# communication. (the range means that if the port is busy, it will automatically -# try the next port). - -# Set the bind address specifically (IPv4 or IPv6): +# Elasticsearch performs poorly when the system is swapping the memory. # -#network.bind_host: 192.168.0.1 - -# Set the address other nodes will use to communicate with this node. If not -# set, it is automatically derived. It must point to an actual IP address. +# ---------------------------------- Network ----------------------------------- # -#network.publish_host: 192.168.0.1 - -# Set both 'bind_host' and 'publish_host': +# Set the bind adress to a specific IP (IPv4 or IPv6): # -#network.host: 192.168.0.1 - -# Set a custom port for the node to node communication (9300 by default): +# network.host: 192.168.0.1 # -#transport.tcp.port: 9300 - -# Enable compression for all communication between nodes (disabled by default): +# Set a custom port for HTTP: # -#transport.tcp.compress: true - -# Set a custom port to listen for HTTP traffic: +# http.port: 9200 # -#http.port: 9200 - -# Set a custom allowed content length: +# For more information, see the documentation at: +# # -#http.max_content_length: 100mb - -# Disable HTTP completely: +# ---------------------------------- Gateway ----------------------------------- # -#http.enabled: false - -################################### Gateway ################################### - -# The gateway allows for persisting the cluster state between full cluster -# restarts. Every change to the state (such as adding an index) will be stored -# in the gateway, and when the cluster starts up for the first time, -# it will read its state from the gateway. - -# For more information, see -# . - -# Settings below control how and when to start the initial recovery process on -# a full cluster restart (to reuse as much local data as possible when using shared -# gateway). - -# Allow recovery process after N nodes in a cluster are up: +# Block initial recovery after a full cluster restart until N nodes are started: # -#gateway.recover_after_nodes: 1 - -# Set the timeout to initiate the recovery process, once the N nodes -# from previous setting are up (accepts time value): +# gateway.recover_after_nodes: 3 # -#gateway.recover_after_time: 5m - -# Set how many nodes are expected in this cluster. Once these N nodes -# are up (and recover_after_nodes is met), begin recovery process immediately -# (without waiting for recover_after_time to expire): +# For more information, see the documentation at: +# # -#gateway.expected_nodes: 2 - - -############################# Recovery Throttling ############################# - -# These settings allow to control the process of shards allocation between -# nodes during initial recovery, replica allocation, rebalancing, -# or when adding and removing nodes. - -# Set the number of concurrent recoveries happening on a node: +# --------------------------------- Discovery ---------------------------------- # -# 1. During the initial recovery +# Elasticsearch nodes will find each other via multicast, by default. # -#cluster.routing.allocation.node_initial_primaries_recoveries: 4 +# To use the unicast discovery, disable the multicast discovery: # -# 2. During adding/removing nodes, rebalancing, etc +# discovery.zen.ping.multicast.enabled: false # -#cluster.routing.allocation.node_concurrent_recoveries: 2 - -# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): +# Pass an initial list of hosts to perform discovery when new node is started: # -#indices.recovery.max_bytes_per_sec: 20mb - -# Set to limit the number of open concurrent streams when -# recovering a shard from a peer: +# discovery.zen.ping.unicast.hosts: ["host1", "host2"] # -#indices.recovery.concurrent_streams: 5 - - -################################## Discovery ################################## - -# Discovery infrastructure ensures nodes can be found within a cluster -# and master node is elected. Multicast discovery is the default. - -# Set to ensure a node sees N other master eligible nodes to be considered -# operational within the cluster. This should be set to a quorum/majority of -# the master-eligible nodes in the cluster. +# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): # -#discovery.zen.minimum_master_nodes: 1 - -# Set the time to wait for ping responses from other nodes when discovering. -# Set this option to a higher value on a slow or congested network -# to minimize discovery failures: +# discovery.zen.minimum_master_nodes: 3 # -#discovery.zen.ping.timeout: 3s - -# For more information, see -# - -# Unicast discovery allows to explicitly control which nodes will be used -# to discover the cluster. It can be used when multicast is not present, -# or to restrict the cluster communication-wise. +# For more information, see the documentation at: +# # -# 1. Disable multicast discovery (enabled by default): +# ---------------------------------- Various ----------------------------------- # -#discovery.zen.ping.multicast.enabled: false +# Disable starting multiple nodes on a single system: # -# 2. Configure an initial list of master nodes in the cluster -# to perform discovery when new nodes (master or data) are started: +# node.max_local_storage_nodes: 1 # -#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"] - -# EC2 discovery allows to use AWS EC2 API in order to perform discovery. +# Require explicit names when deleting indices: # -# You have to install the cloud-aws plugin for enabling the EC2 discovery. -# -# For more information, see -# -# -# See -# for a step-by-step tutorial. - -# GCE discovery allows to use Google Compute Engine API in order to perform discovery. -# -# You have to install the cloud-gce plugin for enabling the GCE discovery. -# -# For more information, see . - -# Azure discovery allows to use Azure API in order to perform discovery. -# -# You have to install the cloud-azure plugin for enabling the Azure discovery. -# -# For more information, see . - -################################## Slow Log ################################## - -# Shard level query and fetch threshold logging. - -#index.search.slowlog.threshold.query.warn: 10s -#index.search.slowlog.threshold.query.info: 5s -#index.search.slowlog.threshold.query.debug: 2s -#index.search.slowlog.threshold.query.trace: 500ms - -#index.search.slowlog.threshold.fetch.warn: 1s -#index.search.slowlog.threshold.fetch.info: 800ms -#index.search.slowlog.threshold.fetch.debug: 500ms -#index.search.slowlog.threshold.fetch.trace: 200ms - -#index.indexing.slowlog.threshold.index.warn: 10s -#index.indexing.slowlog.threshold.index.info: 5s -#index.indexing.slowlog.threshold.index.debug: 2s -#index.indexing.slowlog.threshold.index.trace: 500ms - -################################## GC Logging ################################ - -#monitor.jvm.gc.young.warn: 1000ms -#monitor.jvm.gc.young.info: 700ms -#monitor.jvm.gc.young.debug: 400ms - -#monitor.jvm.gc.old.warn: 10s -#monitor.jvm.gc.old.info: 5s -#monitor.jvm.gc.old.debug: 2s +# action.destructive_requires_name: true From db003a0b32de7dcd499f71a5d15f6accd20b2590 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sat, 2 May 2015 10:52:27 -0400 Subject: [PATCH 09/21] remove hack --- src/main/resources/org/elasticsearch/bootstrap/security.policy | 1 - 1 file changed, 1 deletion(-) diff --git a/src/main/resources/org/elasticsearch/bootstrap/security.policy b/src/main/resources/org/elasticsearch/bootstrap/security.policy index 0b88aba449d..4b9b9699e80 100644 --- a/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -28,7 +28,6 @@ grant { // temporary files permission java.io.FilePermission "${java.io.tmpdir}", "read,write"; - permission java.io.FilePermission "${java.io.tmpdir}-", "read,write,delete"; permission java.io.FilePermission "${java.io.tmpdir}${/}-", "read,write,delete"; // paths used for running tests From ff44f45af160b5ccdf0a0362c4e0215431f5e1ed Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sat, 2 May 2015 12:08:46 -0400 Subject: [PATCH 10/21] log this --- src/main/java/org/elasticsearch/bootstrap/Security.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 49c2bccf827..7f3b450cbc0 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -53,6 +53,7 @@ class Security { */ static void configure(Environment environment) throws IOException { ESLogger log = Loggers.getLogger(Security.class); + log.info("java.io.tmpdir: {}", System.getProperty("java.io.tmpdir")); // init lucene random seed. it will use /dev/urandom where available. StringHelper.randomId(); InputStream config = Security.class.getResourceAsStream(POLICY_RESOURCE); From e1238c5e4c722498b5e6515a932d7cbd8018bd87 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sat, 2 May 2015 12:11:33 -0400 Subject: [PATCH 11/21] add 2 more x --- src/main/java/org/elasticsearch/bootstrap/Security.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 7f3b450cbc0..f0181fee243 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -71,12 +71,14 @@ class Security { } PermissionCollection permissions = policy.getPermissions(Security.class.getProtectionDomain()); log.trace("generated permissions: {}", permissions); - + log.info("java.io.tmpdir: {}", System.getProperty("java.io.tmpdir")); + System.setSecurityManager(new SecurityManager()); try { // don't hide securityexception here, it means java.io.tmpdir is not accessible! Files.delete(newConfig); } catch (SecurityException broken) { + log.info("java.io.tmpdir: {}", System.getProperty("java.io.tmpdir")); log.error("unable to properly access temporary files, permissions: {}", permissions); throw broken; } catch (IOException ignore) { From bdd6d9c705ad0a2176364a7dc06e7af8bcb6cba2 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sat, 2 May 2015 12:27:38 -0400 Subject: [PATCH 12/21] heisenbug --- src/main/java/org/elasticsearch/bootstrap/Security.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index f0181fee243..2a216626c66 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -53,7 +53,8 @@ class Security { */ static void configure(Environment environment) throws IOException { ESLogger log = Loggers.getLogger(Security.class); - log.info("java.io.tmpdir: {}", System.getProperty("java.io.tmpdir")); + //String prop = System.getProperty("java.io.tmpdir"); + //log.trace("java.io.tmpdir {}", prop); // init lucene random seed. it will use /dev/urandom where available. StringHelper.randomId(); InputStream config = Security.class.getResourceAsStream(POLICY_RESOURCE); @@ -71,14 +72,12 @@ class Security { } PermissionCollection permissions = policy.getPermissions(Security.class.getProtectionDomain()); log.trace("generated permissions: {}", permissions); - log.info("java.io.tmpdir: {}", System.getProperty("java.io.tmpdir")); System.setSecurityManager(new SecurityManager()); try { // don't hide securityexception here, it means java.io.tmpdir is not accessible! Files.delete(newConfig); } catch (SecurityException broken) { - log.info("java.io.tmpdir: {}", System.getProperty("java.io.tmpdir")); log.error("unable to properly access temporary files, permissions: {}", permissions); throw broken; } catch (IOException ignore) { From 8c0d03c3ee1b45d3b21c184606d088aca18aa6d4 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sat, 2 May 2015 12:41:38 -0400 Subject: [PATCH 13/21] add a hack for windows --- .../org/elasticsearch/bootstrap/Security.java | 23 +++++-------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 2a216626c66..536ccd47908 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -31,10 +31,6 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; -import java.security.NoSuchAlgorithmException; -import java.security.PermissionCollection; -import java.security.Policy; -import java.security.URIParameter; /** * Initializes securitymanager with necessary permissions. @@ -53,8 +49,6 @@ class Security { */ static void configure(Environment environment) throws IOException { ESLogger log = Loggers.getLogger(Security.class); - //String prop = System.getProperty("java.io.tmpdir"); - //log.trace("java.io.tmpdir {}", prop); // init lucene random seed. it will use /dev/urandom where available. StringHelper.randomId(); InputStream config = Security.class.getResourceAsStream(POLICY_RESOURCE); @@ -63,22 +57,12 @@ class Security { } Path newConfig = processTemplate(config, environment); System.setProperty("java.security.policy", newConfig.toString()); - // retrieve the parsed policy we created: its useful if something goes wrong - Policy policy = null; - try { - policy = Policy.getInstance("JavaPolicy", new URIParameter(newConfig.toUri())); - } catch (NoSuchAlgorithmException impossible) { - throw new RuntimeException(impossible); - } - PermissionCollection permissions = policy.getPermissions(Security.class.getProtectionDomain()); - log.trace("generated permissions: {}", permissions); - System.setSecurityManager(new SecurityManager()); try { // don't hide securityexception here, it means java.io.tmpdir is not accessible! Files.delete(newConfig); } catch (SecurityException broken) { - log.error("unable to properly access temporary files, permissions: {}", permissions); + log.error("unable to properly access temporary files, run with -Djava.security.debug=policy for more information"); throw broken; } catch (IOException ignore) { // e.g. virus scanner on windows @@ -108,6 +92,11 @@ class Security { addPath(writer, environment.configFile(), "read,readlink,write,delete"); addPath(writer, environment.logsFile(), "read,readlink,write,delete"); addPath(writer, environment.pluginsFile(), "read,readlink,write,delete"); + + // generate explicit perms for actual temp dir: + // (in case there is java.io.tmpdir sheistiness on windows) + addPath(writer, processed.getParent(), "read,readlink,write,delete"); + for (Path path : environment.dataFiles()) { addPath(writer, path, "read,readlink,write,delete"); } From 86fc8ceac71fbcfe95d6675eeb6b2ffc2cb71d41 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sat, 2 May 2015 14:42:06 -0400 Subject: [PATCH 14/21] simplify security rules --- .../org/elasticsearch/bootstrap/Security.java | 139 ++++++++---------- .../bootstrap/SecurityTests.java | 36 ++--- 2 files changed, 79 insertions(+), 96 deletions(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 536ccd47908..a9eedb7816a 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -19,18 +19,19 @@ package org.elasticsearch.bootstrap; -import com.google.common.io.ByteStreams; - import org.apache.lucene.util.StringHelper; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.env.Environment; import java.io.*; -import java.nio.charset.StandardCharsets; +import java.net.URI; import java.nio.file.Files; -import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.security.Permission; +import java.security.PermissionCollection; +import java.security.Permissions; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.security.URIParameter; /** * Initializes securitymanager with necessary permissions. @@ -47,84 +48,74 @@ class Security { * Initializes securitymanager for the environment * Can only happen once! */ - static void configure(Environment environment) throws IOException { - ESLogger log = Loggers.getLogger(Security.class); - // init lucene random seed. it will use /dev/urandom where available. + static void configure(Environment environment) throws Exception { + // init lucene random seed. it will use /dev/urandom where available: StringHelper.randomId(); - InputStream config = Security.class.getResourceAsStream(POLICY_RESOURCE); - if (config == null) { - throw new NoSuchFileException(POLICY_RESOURCE); - } - Path newConfig = processTemplate(config, environment); - System.setProperty("java.security.policy", newConfig.toString()); + + // enable security policy: union of template and environment-based paths. + URI template = Security.class.getResource(POLICY_RESOURCE).toURI(); + Policy.setPolicy(new ESPolicy(template, createPermissions(environment))); + + // enable security manager System.setSecurityManager(new SecurityManager()); - try { - // don't hide securityexception here, it means java.io.tmpdir is not accessible! - Files.delete(newConfig); - } catch (SecurityException broken) { - log.error("unable to properly access temporary files, run with -Djava.security.debug=policy for more information"); - throw broken; - } catch (IOException ignore) { - // e.g. virus scanner on windows - } + + // do some basic tests + selfTest(); } - - // package-private for testing - static Path processTemplate(InputStream template, Environment environment) throws IOException { - Path processed = Files.createTempFile(null, null); - try (OutputStream output = new BufferedOutputStream(Files.newOutputStream(processed))) { - // copy the template as-is. - try (InputStream in = new BufferedInputStream(template)) { - ByteStreams.copy(in, output); - } - // all policy files are UTF-8: - // https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html - try (Writer writer = new OutputStreamWriter(output, StandardCharsets.UTF_8)) { - writer.write(System.lineSeparator()); - writer.write("grant {"); - writer.write(System.lineSeparator()); - - // add permissions for all configured paths. - // TODO: improve test infra so we can reduce permissions where read/write - // is not really needed... - addPath(writer, environment.homeFile(), "read,readlink,write,delete"); - addPath(writer, environment.configFile(), "read,readlink,write,delete"); - addPath(writer, environment.logsFile(), "read,readlink,write,delete"); - addPath(writer, environment.pluginsFile(), "read,readlink,write,delete"); - - // generate explicit perms for actual temp dir: - // (in case there is java.io.tmpdir sheistiness on windows) - addPath(writer, processed.getParent(), "read,readlink,write,delete"); - - for (Path path : environment.dataFiles()) { - addPath(writer, path, "read,readlink,write,delete"); - } - for (Path path : environment.dataWithClusterFiles()) { - addPath(writer, path, "read,readlink,write,delete"); - } - - writer.write("};"); - writer.write(System.lineSeparator()); - } + /** returns dynamic Permissions to configured paths */ + static Permissions createPermissions(Environment environment) throws IOException { + // TODO: improve test infra so we can reduce permissions where read/write + // is not really needed... + Permissions policy = new Permissions(); + addPath(policy, environment.homeFile(), "read,readlink,write,delete"); + addPath(policy, environment.configFile(), "read,readlink,write,delete"); + addPath(policy, environment.logsFile(), "read,readlink,write,delete"); + addPath(policy, environment.pluginsFile(), "read,readlink,write,delete"); + for (Path path : environment.dataFiles()) { + addPath(policy, path, "read,readlink,write,delete"); } - return processed; + for (Path path : environment.dataWithClusterFiles()) { + addPath(policy, path, "read,readlink,write,delete"); + } + + return policy; } - static void addPath(Writer writer, Path path, String permissions) throws IOException { + /** Add access to path (and all files underneath it */ + static void addPath(Permissions policy, Path path, String permissions) throws IOException { // paths may not exist yet Files.createDirectories(path); // add each path twice: once for itself, again for files underneath it - writer.write("permission java.io.FilePermission \"" + encode(path) + "\", \"" + permissions + "\";"); - writer.write(System.lineSeparator()); - writer.write("permission java.io.FilePermission \"" + encode(path) + "${/}-\", \"" + permissions + "\";"); - writer.write(System.lineSeparator()); + policy.add(new FilePermission(path.toString(), permissions)); + policy.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", permissions)); } - - // Any backslashes in paths must be escaped, because it is the escape character when parsing. - // See "Note Regarding File Path Specifications on Windows Systems". - // https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html - static String encode(Path path) { - return path.toString().replace("\\", "\\\\"); + + /** Simple checks that everything is ok */ + static void selfTest() { + // check we can manipulate temporary files + try { + Files.delete(Files.createTempFile(null, null)); + } catch (IOException ignored) { + // potentially virus scanner + } catch (SecurityException problem) { + throw new SecurityException("Security misconfiguration: cannot access java.io.tmpdir", problem); + } + } + + /** custom policy for union of static and dynamic permissions */ + static class ESPolicy extends Policy { + final Policy template; + final PermissionCollection dynamic; + + ESPolicy(URI template, PermissionCollection dynamic) throws Exception { + this.template = Policy.getInstance("JavaPolicy", new URIParameter(template)); + this.dynamic = dynamic; + } + + @Override + public boolean implies(ProtectionDomain domain, Permission permission) { + return template.implies(domain, permission) || dynamic.implies(permission); + } } } diff --git a/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java index 4c2ddcd47eb..edbcafdddbd 100644 --- a/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java +++ b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java @@ -24,12 +24,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ElasticsearchTestCase; -import java.io.ByteArrayInputStream; import java.io.FilePermission; import java.nio.file.Path; -import java.security.Policy; -import java.security.ProtectionDomain; -import java.security.URIParameter; +import java.security.Permissions; public class SecurityTests extends ElasticsearchTestCase { @@ -42,17 +39,15 @@ public class SecurityTests extends ElasticsearchTestCase { settingsBuilder.put("path.home", esHome.toString()); Settings settings = settingsBuilder.build(); - Environment environment = new Environment(settings); - Path policyFile = Security.processTemplate(new ByteArrayInputStream(new byte[0]), environment); + Environment environment = new Environment(settings); + Permissions permissions = Security.createPermissions(environment); - ProtectionDomain domain = getClass().getProtectionDomain(); - Policy policy = Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toUri())); // the fake es home - assertTrue(policy.implies(domain, new FilePermission(esHome.toString(), "read"))); + assertTrue(permissions.implies(new FilePermission(esHome.toString(), "read"))); // its parent - assertFalse(policy.implies(domain, new FilePermission(path.toString(), "read"))); + assertFalse(permissions.implies(new FilePermission(path.toString(), "read"))); // some other sibling - assertFalse(policy.implies(domain, new FilePermission(path.resolve("other").toString(), "read"))); + assertFalse(permissions.implies(new FilePermission(path.resolve("other").toString(), "read"))); } /** test generated permissions for all configured paths */ @@ -67,29 +62,26 @@ public class SecurityTests extends ElasticsearchTestCase { settingsBuilder.put("path.logs", path.resolve("logs").toString()); Settings settings = settingsBuilder.build(); - Environment environment = new Environment(settings); - Path policyFile = Security.processTemplate(new ByteArrayInputStream(new byte[0]), environment); - - ProtectionDomain domain = getClass().getProtectionDomain(); - Policy policy = Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toUri())); + Environment environment = new Environment(settings); + Permissions permissions = Security.createPermissions(environment); // check that all directories got permissions: // homefile: this is needed unless we break out rules for "lib" dir. // TODO: make read-only - assertTrue(policy.implies(domain, new FilePermission(environment.homeFile().toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(environment.homeFile().toString(), "read,readlink,write,delete"))); // config file // TODO: make read-only - assertTrue(policy.implies(domain, new FilePermission(environment.configFile().toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(environment.configFile().toString(), "read,readlink,write,delete"))); // plugins: r/w, TODO: can this be minimized? - assertTrue(policy.implies(domain, new FilePermission(environment.pluginsFile().toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(environment.pluginsFile().toString(), "read,readlink,write,delete"))); // data paths: r/w for (Path dataPath : environment.dataFiles()) { - assertTrue(policy.implies(domain, new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); } for (Path dataPath : environment.dataWithClusterFiles()) { - assertTrue(policy.implies(domain, new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); } // logs: r/w - assertTrue(policy.implies(domain, new FilePermission(environment.logsFile().toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(environment.logsFile().toString(), "read,readlink,write,delete"))); } } From fc54ff5f10d27291d2cd8734b411936f413ffd03 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sat, 2 May 2015 15:19:01 -0400 Subject: [PATCH 15/21] remove now-unnecessary test permission --- src/main/resources/org/elasticsearch/bootstrap/security.policy | 1 - 1 file changed, 1 deletion(-) diff --git a/src/main/resources/org/elasticsearch/bootstrap/security.policy b/src/main/resources/org/elasticsearch/bootstrap/security.policy index 4b9b9699e80..027e4bd3ea5 100644 --- a/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -86,7 +86,6 @@ grant { // needed for testing access rules etc permission java.lang.RuntimePermission "createSecurityManager"; - permission java.security.SecurityPermission "createPolicy.JavaPolicy"; // reflection hacks: // needed for Striped64 (what is this doing), also enables unmap hack From 3a89b990ead0600458ed58035359ae4738b6f632 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sun, 3 May 2015 23:37:05 -0400 Subject: [PATCH 16/21] remove another unnecessary permission --- src/main/resources/org/elasticsearch/bootstrap/security.policy | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/main/resources/org/elasticsearch/bootstrap/security.policy b/src/main/resources/org/elasticsearch/bootstrap/security.policy index 027e4bd3ea5..438fa87d333 100644 --- a/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -84,9 +84,6 @@ grant { // needed for natives calls permission java.lang.RuntimePermission "loadLibrary.*"; - // needed for testing access rules etc - permission java.lang.RuntimePermission "createSecurityManager"; - // reflection hacks: // needed for Striped64 (what is this doing), also enables unmap hack permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; From b72f27a410fb8e4b21cdedd7485b317add305597 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 24 Apr 2015 20:59:22 +0200 Subject: [PATCH 17/21] Core: Cut over to the Lucene filter cache. This removes Elasticsearch's filter cache and uses Lucene's instead. It has some implications: - custom cache keys (`_cache_key`) are unsupported - decisions are made internally and can't be overridden by users ('_cache`) - not only filters can be cached but also all queries that do not need scores - parent/child queries can now be cached, however cached entries are only valid for the current top-level reader so in practice it will likely only be used on read-only indices - the cache deduplicates filters, which plays nicer with large keys (eg. `terms`) - better stats: we already had ram usage and evictions, but now also hit count, miss count, lookup count, number of cached doc id sets and current number of doc id sets in the cache - dynamically changing the filter cache size is not supported anymore Internally, an important change is that it removes the NoCacheFilter infrastructure in favour of making Query.rewrite specializing the query for the current reader so that it will only be cached on this reader (look for IndexCacheableQuery). Note that consuming filters with the query API (createWeight/scorer) instead of the filter API (getDocIdSet) is important for parent/child queries because otherwise a QueryWrapperFilter(ParentQuery) would run the wrapped query per segment while relations might be cross segments. --- dev-tools/forbidden/core-signatures.txt | 3 - .../cluster/update-settings.asciidoc | 3 - docs/reference/migration/migrate_2_0.asciidoc | 6 + docs/reference/query-dsl/filters.asciidoc | 85 +--- .../query-dsl/filters/and-filter.asciidoc | 37 -- .../filters/geo-bounding-box-filter.asciidoc | 8 - .../filters/geo-distance-filter.asciidoc | 8 - .../filters/geo-polygon-filter.asciidoc | 8 - .../filters/geo-shape-filter.asciidoc | 9 - .../filters/geohash-cell-filter.asciidoc | 7 - .../filters/has-child-filter.asciidoc | 6 - .../filters/has-parent-filter.asciidoc | 6 - .../query-dsl/filters/nested-filter.asciidoc | 8 +- .../query-dsl/filters/not-filter.asciidoc | 30 -- .../query-dsl/filters/or-filter.asciidoc | 33 -- .../query-dsl/filters/prefix-filter.asciidoc | 19 - .../query-dsl/filters/query-filter.asciidoc | 31 -- .../query-dsl/filters/range-filter.asciidoc | 8 - .../query-dsl/filters/regexp-filter.asciidoc | 4 +- .../query-dsl/filters/script-filter.asciidoc | 8 - .../query-dsl/filters/term-filter.asciidoc | 19 - .../query-dsl/filters/terms-filter.asciidoc | 14 +- rest-api-spec/api/indices.clear_cache.json | 4 - .../CustomPostingsHighlighter.java | 5 +- .../cluster/stats/ClusterStatsIndices.java | 1 + .../cache/clear/ClearIndicesCacheRequest.java | 12 - .../ClearIndicesCacheRequestBuilder.java | 5 - .../clear/ShardClearIndicesCacheRequest.java | 8 - .../TransportClearIndicesCacheAction.java | 4 - .../ClusterDynamicSettingsModule.java | 4 - .../common/lucene/IndexCacheableQuery.java | 74 +++ .../common/lucene/ShardCoreKeyMap.java | 109 +++++ .../common/lucene/docset/DocIdSets.java | 44 +- .../common/lucene/search/CachedFilter.java | 32 -- .../lucene/search/FilteredCollector.java | 8 +- .../common/lucene/search/NoCacheFilter.java | 79 ---- .../common/lucene/search/NoCacheQuery.java | 36 -- .../common/lucene/search/Queries.java | 31 +- .../lucene/search/ResolvableFilter.java | 11 + .../function/FiltersFunctionScoreQuery.java | 18 +- .../org/elasticsearch/index/IndexService.java | 2 +- .../index/aliases/IndexAliasesService.java | 4 +- .../elasticsearch/index/cache/IndexCache.java | 9 +- .../index/cache/bitset/BitsetFilterCache.java | 2 - .../index/cache/filter/FilterCache.java | 18 +- .../index/cache/filter/FilterCacheModule.java | 4 +- .../index/cache/filter/FilterCacheStats.java | 95 +++- .../index/cache/filter/ShardFilterCache.java | 34 +- .../cache/filter/index/IndexFilterCache.java | 63 +++ .../cache/filter/none/NoneFilterCache.java | 29 +- .../filter/weighted/WeightedFilterCache.java | 277 ----------- .../index/cache/query/ShardQueryCache.java | 6 +- .../index/engine/EngineConfig.java | 26 +- .../index/engine/EngineSearcherFactory.java | 4 +- .../index/engine/InternalEngine.java | 8 +- .../index/mapper/DocumentMapper.java | 24 +- .../index/mapper/MapperService.java | 15 +- .../mapper/core/AbstractFieldMapper.java | 12 +- .../index/mapper/core/BooleanFieldMapper.java | 4 +- .../index/mapper/core/ByteFieldMapper.java | 6 +- .../index/mapper/core/DateFieldMapper.java | 11 +- .../index/mapper/core/DoubleFieldMapper.java | 8 +- .../index/mapper/core/FloatFieldMapper.java | 6 +- .../index/mapper/core/IntegerFieldMapper.java | 6 +- .../index/mapper/core/LongFieldMapper.java | 6 +- .../index/mapper/core/ShortFieldMapper.java | 8 +- .../index/mapper/internal/IdFieldMapper.java | 10 +- .../mapper/internal/ParentFieldMapper.java | 9 +- .../mapper/internal/TypeFieldMapper.java | 8 +- .../index/mapper/ip/IpFieldMapper.java | 6 +- .../index/mapper/object/ObjectMapper.java | 4 +- .../percolator/PercolatorQueriesRegistry.java | 12 +- .../index/query/AndFilterBuilder.java | 22 - .../index/query/AndFilterParser.java | 20 +- .../index/query/BoolFilterBuilder.java | 22 - .../index/query/BoolFilterParser.java | 20 +- .../index/query/ConstantScoreQueryParser.java | 15 +- .../index/query/ExistsFilterParser.java | 7 +- .../index/query/FQueryFilterParser.java | 17 +- .../index/query/FilterBuilder.java | 1 - .../index/query/FilteredQueryParser.java | 15 +- .../query/GeoBoundingBoxFilterBuilder.java | 22 - .../query/GeoBoundingBoxFilterParser.java | 15 +- .../index/query/GeoDistanceFilterBuilder.java | 22 - .../index/query/GeoDistanceFilterParser.java | 13 +- .../query/GeoDistanceRangeFilterBuilder.java | 22 - .../query/GeoDistanceRangeFilterParser.java | 13 +- .../index/query/GeoPolygonFilterBuilder.java | 23 +- .../index/query/GeoPolygonFilterParser.java | 13 +- .../index/query/GeoShapeFilterBuilder.java | 31 -- .../index/query/GeoShapeFilterParser.java | 18 +- .../index/query/GeohashCellFilter.java | 41 +- .../index/query/HasChildFilterParser.java | 16 +- .../index/query/HasChildQueryParser.java | 7 +- .../index/query/HasParentFilterBuilder.java | 14 - .../index/query/HasParentFilterParser.java | 12 +- .../index/query/HasParentQueryParser.java | 10 +- .../index/query/IdsFilterParser.java | 3 +- .../index/query/IndexQueryParserService.java | 9 - .../index/query/MissingFilterParser.java | 16 +- .../index/query/NestedFilterBuilder.java | 21 - .../index/query/NestedFilterParser.java | 16 +- .../index/query/NotFilterBuilder.java | 13 - .../index/query/NotFilterParser.java | 17 +- .../index/query/OrFilterBuilder.java | 22 - .../index/query/OrFilterParser.java | 18 +- .../index/query/PrefixFilterBuilder.java | 22 - .../index/query/PrefixFilterParser.java | 15 +- .../index/query/QueryFilterBuilder.java | 15 +- .../index/query/QueryFilterParser.java | 4 +- .../index/query/QueryParseContext.java | 116 +---- .../index/query/RangeFilterBuilder.java | 22 - .../index/query/RangeFilterParser.java | 17 +- .../index/query/RegexpFilterBuilder.java | 21 - .../index/query/RegexpFilterParser.java | 18 +- .../index/query/ScriptFilterBuilder.java | 22 - .../index/query/ScriptFilterParser.java | 21 +- .../index/query/TermFilterBuilder.java | 22 - .../index/query/TermFilterParser.java | 24 +- .../index/query/TermsFilterBuilder.java | 22 - .../index/query/TermsFilterParser.java | 17 +- .../index/query/TermsLookupFilterBuilder.java | 18 - .../index/query/TopChildrenQueryParser.java | 6 +- .../index/query/TypeFilterParser.java | 6 +- .../child/ChildrenConstantScoreQuery.java | 50 +- .../index/search/child/ChildrenQuery.java | 63 +-- .../child/CustomQueryWrappingFilter.java | 136 ------ .../child/ParentConstantScoreQuery.java | 50 +- .../index/search/child/ParentIdsFilter.java | 10 +- .../index/search/child/ParentQuery.java | 113 +++-- .../index/search/child/TopChildrenQuery.java | 65 +-- .../geo/IndexedGeoBoundingBoxFilter.java | 6 +- .../elasticsearch/index/shard/IndexShard.java | 6 +- .../indices/NodeIndicesStats.java | 1 + .../cache/filter/IndicesFilterCache.java | 432 +++++++++++------- .../MultiDocumentPercolatorIndex.java | 4 +- .../percolator/PercolateContext.java | 5 - .../percolator/PercolatorService.java | 4 +- .../clear/RestClearIndicesCacheAction.java | 4 - .../elasticsearch/search/SearchService.java | 9 +- .../search/aggregations/AggregationPhase.java | 11 +- .../bucket/children/ChildrenParser.java | 4 +- .../children/ParentToChildrenAggregator.java | 45 +- .../bucket/filter/FilterAggregator.java | 11 +- .../bucket/filters/FiltersAggregator.java | 32 +- .../bucket/nested/NestedAggregator.java | 11 +- .../bucket/nested/NestedParser.java | 2 +- .../search/dfs/CachedDfSource.java | 5 +- .../search/fetch/FetchPhase.java | 2 +- .../search/fetch/FetchSubPhase.java | 13 - .../fetch/innerhits/InnerHitsContext.java | 27 +- .../search/highlight/PostingsHighlighter.java | 2 +- .../search/internal/ContextIndexSearcher.java | 10 +- .../search/internal/DefaultSearchContext.java | 18 +- .../internal/FilteredSearchContext.java | 10 +- .../search/internal/SearchContext.java | 10 +- .../search/lookup/LeafIndexLookup.java | 24 +- .../search/sort/GeoDistanceSortParser.java | 4 +- .../search/sort/ScriptSortParser.java | 4 +- .../search/sort/SortParseElement.java | 4 +- .../CustomPostingsHighlighterTests.java | 36 +- .../XPostingsHighlighterTests.java | 2 - .../aliases/IndexAliasesTests.java | 4 +- ...TimeDataHistogramAggregationBenchmark.java | 3 +- .../settings/ClusterSettingsTests.java | 3 +- .../lucene/IndexCacheableQueryTests.java | 140 ++++++ .../common/lucene/ShardCoreKeyMapTests.java | 137 ++++++ .../lucene/index/FreqTermsEnumTests.java | 3 +- .../count/query/CountQueryTests.java | 20 - .../aliases/IndexAliasesServiceTests.java | 12 +- .../cache/bitset/BitSetFilterCacheTest.java | 4 +- .../index/engine/InternalEngineTests.java | 12 +- .../index/engine/ShadowEngineTests.java | 3 +- .../query/SimpleIndexQueryParserTests.java | 83 ++-- .../search/child/AbstractChildTests.java | 4 - .../ChildrenConstantScoreQueryTests.java | 5 +- .../search/child/ChildrenQueryTests.java | 3 +- .../child/ParentConstantScoreQueryTests.java | 4 +- .../index/search/child/ParentQueryTests.java | 4 +- .../AbstractNumberNestedSortingTests.java | 9 +- .../nested/DoubleNestedSortingTests.java | 3 +- .../nested/FloatNestedSortingTests.java | 3 +- .../search/nested/NestedSortingTests.java | 11 +- .../cache/query/IndicesQueryCacheTests.java | 1 + .../indices/stats/IndexStatsTests.java | 126 +++-- .../template/SimpleIndexTemplateTests.java | 2 +- .../warmer/SimpleIndicesWarmerTests.java | 1 + .../bucket/nested/NestedAggregatorTest.java | 3 +- .../child/SimpleChildQuerySearchTests.java | 163 +------ .../innerhits/NestedChildrenFilterTest.java | 6 +- .../functionscore/FunctionScoreTests.java | 4 +- .../search/geo/GeoFilterTests.java | 15 - .../search/query/SearchQueryTests.java | 4 +- .../scriptfilter/ScriptFilterSearchTests.java | 63 +-- .../search/sort/SimpleSortTests.java | 6 +- .../test/ElasticsearchIntegrationTest.java | 2 - .../test/ElasticsearchTestCase.java | 15 - .../test/InternalTestCluster.java | 4 +- .../elasticsearch/test/TestSearchContext.java | 9 - .../test/engine/MockEngineSupport.java | 10 +- 200 files changed, 1734 insertions(+), 2970 deletions(-) create mode 100644 src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java create mode 100644 src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java delete mode 100644 src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java delete mode 100644 src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java delete mode 100644 src/main/java/org/elasticsearch/common/lucene/search/NoCacheQuery.java create mode 100644 src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java delete mode 100644 src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java delete mode 100644 src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java create mode 100644 src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java create mode 100644 src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java diff --git a/dev-tools/forbidden/core-signatures.txt b/dev-tools/forbidden/core-signatures.txt index 2a662a60974..acd66985081 100644 --- a/dev-tools/forbidden/core-signatures.txt +++ b/dev-tools/forbidden/core-signatures.txt @@ -39,9 +39,6 @@ org.apache.lucene.index.IndexReader#decRef() org.apache.lucene.index.IndexReader#incRef() org.apache.lucene.index.IndexReader#tryIncRef() -@defaultMessage QueryWrapperFilter is cacheable by default - use Queries#wrap instead -org.apache.lucene.search.QueryWrapperFilter#(org.apache.lucene.search.Query) - @defaultMessage Pass the precision step from the mappings explicitly instead org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean) org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean) diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 438b04d4094..a0f7bbaa976 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -153,9 +153,6 @@ due to forced awareness or allocation filtering. `indices.cache.filter.size`:: See <> -`indices.cache.filter.expire` (time):: - See <> - [float] ==== TTL interval diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index 5435f4df2fd..292bb633a29 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -418,6 +418,12 @@ favour or `bool`. The `execution` option of the `terms` filter is now deprecated and ignored if provided. +The `_cache` and `_cache_key` parameters of filters are deprecated in the REST +layer and removed in the Java API. In case they are specified they will be +ignored. Instead filters are always used as their own cache key and elasticsearch +makes decisions by itself about whether it should cache filters based on how +often they are used. + === Snapshot and Restore The obsolete parameters `expand_wildcards_open` and `expand_wildcards_close` are no longer diff --git a/docs/reference/query-dsl/filters.asciidoc b/docs/reference/query-dsl/filters.asciidoc index 0c78dd21934..59a4a06caec 100644 --- a/docs/reference/query-dsl/filters.asciidoc +++ b/docs/reference/query-dsl/filters.asciidoc @@ -10,85 +10,14 @@ As a general rule, filters should be used instead of queries: [[caching]] === Filters and Caching -Filters can be a great candidate for caching. Caching the result of a -filter does not require a lot of memory, and will cause other queries -executing against the same filter (same parameters) to be blazingly -fast. +Filters can be a great candidate for caching. Caching the document set that +a filter matches does not require much memory and can help improve +execution speed of queries. -However the cost of caching is not the same for all filters. For -instance some filters are already fast out of the box while caching could -add significant overhead, and some filters produce results that are already -cacheable so caching them is just a matter of putting the result in the -cache. - -The default caching policy, `_cache: auto`, tracks the 1000 most recently -used filters on a per-index basis and makes decisions based on their -frequency. - -[float] -==== Filters that read directly the index structure - -Some filters can directly read the index structure and potentially jump -over large sequences of documents that are not worth evaluating (for -instance when these documents do not match the query). Caching these -filters introduces overhead given that all documents that the filter -matches need to be consumed in order to be loaded into the cache. - -These filters, which include the <> and -<> filters, are only cached after they -appear 5 times or more in the history of the 1000 most recently used -filters. - -[float] -==== Filters that produce results that are already cacheable - -Some filters produce results that are already cacheable, and the difference -between caching and not caching them is the act of placing the result in -the cache or not. These filters, which include the -<>, -<>, and -<> filters, are by default cached after they -appear twice or more in the history of the most 1000 recently used filters. - -[float] -==== Computational filters - -Some filters need to run some computation in order to figure out whether -a given document matches a filter. These filters, which include the geo and -<> filters, but also the -<> and <> -filters when using the `fielddata` execution mode are never cached by default, -as it would require to evaluate the filter on all documents in your indices -while they can otherwise be only evaluated on documents that match the query. - -[float] -==== Compound filters - -The last type of filters are those working with other filters, and includes -the <>, -<>, -<> and -<> filters. - -There is no general rule about these filters. Depending on the filters that -they wrap, they will sometimes return a filter that dynamically evaluates the -sub filters and sometimes evaluate the sub filters eagerly in order to return -a result that is already cacheable, so depending on the case, these filters -will be cached after they appear 2+ or 5+ times in the history of the most -1000 recently used filters. - -[float] -==== Overriding the default behaviour - -All filters allow to set `_cache` element on them to explicitly control -caching. It accepts 3 values: `true` in order to cache the filter, `false` -to make sure that the filter will not be cached, and `auto`, which is the -default and will decide on whether to cache the filter based on the cost -to cache it and how often it has been used as explained above. - -Filters also allow to set `_cache_key` which will be used as the -caching key for that filter. This can be handy when using very large -filters (like a terms filter with many elements in it). +Elasticsearch decides to cache filters based on how often they are used. For +this reason you might occasionally see better performance by splitting +complex filters into a static part that Elasticsearch will cache and a dynamic +part which is least costly than the original filter. include::filters/and-filter.asciidoc[] diff --git a/docs/reference/query-dsl/filters/and-filter.asciidoc b/docs/reference/query-dsl/filters/and-filter.asciidoc index 043a62e68bf..9484d4bf999 100644 --- a/docs/reference/query-dsl/filters/and-filter.asciidoc +++ b/docs/reference/query-dsl/filters/and-filter.asciidoc @@ -32,40 +32,3 @@ filters. Can be placed within queries that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of -reuse. It is possible to opt-in explicitely for caching by setting `_cache` -to `true`. Since the `_cache` element requires to be set on the `and` filter -itself, the structure then changes a bit to have the filters provided within a -`filters` element: - -[source,js] --------------------------------------------------- -{ - "filtered" : { - "query" : { - "term" : { "name.first" : "shay" } - }, - "filter" : { - "and" : { - "filters": [ - { - "range" : { - "postDate" : { - "from" : "2010-03-01", - "to" : "2010-04-01" - } - } - }, - { - "prefix" : { "name.second" : "ba" } - } - ], - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc b/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc index 7f16ec562d9..748756d7857 100644 --- a/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc @@ -230,11 +230,3 @@ are not supported. Here is an example: } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same bounding box parameters are used on several (many) other -queries. Note, the process of caching the first execution is higher when -caching (since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc b/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc index 670245a11a3..11ab6ccaa66 100644 --- a/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc @@ -172,11 +172,3 @@ The `geo_distance` filter can work with multiple locations / points per document. Once a single location / point matches the filter, the document will be included in the filter. -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same point and distance parameters are used on several (many) other -queries. Note, the process of caching the first execution is higher when -caching (since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc b/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc index a4212343eff..22bcb3fce31 100644 --- a/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc @@ -116,11 +116,3 @@ The filter *requires* the <> type to be set on the relevant field. -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same points parameters are used on several (many) other queries. -Note, the process of caching the first execution is higher when caching -(since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc b/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc index dfe06932bbd..ca1df1ea995 100644 --- a/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc @@ -110,12 +110,3 @@ shape: } -------------------------------------------------- -[float] -==== Caching - -The result of the Filter is not cached by default. Setting `_cache` to -`true` will mean the results of the Filter will be cached. Since shapes -can contain 10s-100s of coordinates and any one differing means a new -shape, it may make sense to only using caching when you are sure that -the shapes will remain reasonably static. - diff --git a/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc b/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc index cd77803f53f..5f55936c616 100644 --- a/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc @@ -61,10 +61,3 @@ next to the given cell. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is not cached by default. The -`_cache` parameter can be set to `true` to turn caching on. -By default the filter uses the resulting geohash cells as a cache key. -This can be changed by using the `_cache_key` option. diff --git a/docs/reference/query-dsl/filters/has-child-filter.asciidoc b/docs/reference/query-dsl/filters/has-child-filter.asciidoc index 2605505a792..4802a5c07fa 100644 --- a/docs/reference/query-dsl/filters/has-child-filter.asciidoc +++ b/docs/reference/query-dsl/filters/has-child-filter.asciidoc @@ -88,9 +88,3 @@ APIS, eg: curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human" -------------------------------------------------- -[float] -==== Caching - -The `has_child` filter cannot be cached in the filter cache. The `_cache` -and `_cache_key` options are a no-op in this filter. Also any filter that -wraps the `has_child` filter either directly or indirectly will not be cached. diff --git a/docs/reference/query-dsl/filters/has-parent-filter.asciidoc b/docs/reference/query-dsl/filters/has-parent-filter.asciidoc index 345e69258bc..dc708cceda3 100644 --- a/docs/reference/query-dsl/filters/has-parent-filter.asciidoc +++ b/docs/reference/query-dsl/filters/has-parent-filter.asciidoc @@ -63,9 +63,3 @@ APIS, eg: curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human" -------------------------------------------------- -[float] -==== Caching - -The `has_parent` filter cannot be cached in the filter cache. The `_cache` -and `_cache_key` options are a no-op in this filter. Also any filter that -wraps the `has_parent` filter either directly or indirectly will not be cached. diff --git a/docs/reference/query-dsl/filters/nested-filter.asciidoc b/docs/reference/query-dsl/filters/nested-filter.asciidoc index 584e26e04f6..41e14cd00c4 100644 --- a/docs/reference/query-dsl/filters/nested-filter.asciidoc +++ b/docs/reference/query-dsl/filters/nested-filter.asciidoc @@ -2,10 +2,7 @@ === Nested Filter A `nested` filter works in a similar fashion to the -<> query, except it's -used as a filter. It follows exactly the same structure, but also allows -to cache the results (set `_cache` to `true`), and have it named (set -the `_name` value). For example: +<> query. For example: [source,js] -------------------------------------------------- @@ -26,8 +23,7 @@ the `_name` value). For example: } ] } - }, - "_cache" : true + } } } } diff --git a/docs/reference/query-dsl/filters/not-filter.asciidoc b/docs/reference/query-dsl/filters/not-filter.asciidoc index 1e2b50fac23..ed533fc6d32 100644 --- a/docs/reference/query-dsl/filters/not-filter.asciidoc +++ b/docs/reference/query-dsl/filters/not-filter.asciidoc @@ -50,33 +50,3 @@ Or, in a longer form with a `filter` element: } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached if there is evidence of reuse. -The `_cache` can be set to `true` in order to cache it (though usually -not needed). Here is an example: - -[source,js] --------------------------------------------------- -{ - "filtered" : { - "query" : { - "term" : { "name.first" : "shay" } - }, - "filter" : { - "not" : { - "filter" : { - "range" : { - "postDate" : { - "from" : "2010-03-01", - "to" : "2010-04-01" - } - } - }, - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/or-filter.asciidoc b/docs/reference/query-dsl/filters/or-filter.asciidoc index c7c845c33ee..890d30f38e0 100644 --- a/docs/reference/query-dsl/filters/or-filter.asciidoc +++ b/docs/reference/query-dsl/filters/or-filter.asciidoc @@ -27,36 +27,3 @@ filters. Can be placed within queries that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence -of reuse. The `_cache` can be -set to `true` in order to cache it (though usually not needed). Since -the `_cache` element requires to be set on the `or` filter itself, the -structure then changes a bit to have the filters provided within a -`filters` element: - -[source,js] --------------------------------------------------- -{ - "filtered" : { - "query" : { - "term" : { "name.first" : "shay" } - }, - "filter" : { - "or" : { - "filters" : [ - { - "term" : { "name.second" : "banon" } - }, - { - "term" : { "name.nick" : "kimchy" } - } - ], - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/prefix-filter.asciidoc b/docs/reference/query-dsl/filters/prefix-filter.asciidoc index 73c13ec8fe1..964d9f42ba2 100644 --- a/docs/reference/query-dsl/filters/prefix-filter.asciidoc +++ b/docs/reference/query-dsl/filters/prefix-filter.asciidoc @@ -16,22 +16,3 @@ a filter. Can be placed within queries that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is cached by default if there is evidence of reuse. -The `_cache` can be set to `true` in order to cache it. Here is an example: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "prefix" : { - "user" : "ki", - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/query-filter.asciidoc b/docs/reference/query-dsl/filters/query-filter.asciidoc index 2c5a7556c9a..8df0f3c3b11 100644 --- a/docs/reference/query-dsl/filters/query-filter.asciidoc +++ b/docs/reference/query-dsl/filters/query-filter.asciidoc @@ -19,34 +19,3 @@ that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of reuse. - -The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same query is used on several (many) other queries. Note, the -process of caching the first execution is higher when not caching (since -it needs to satisfy different queries). - -Setting the `_cache` element requires a different format for the -`query`: - -[source,js] --------------------------------------------------- -{ - "constantScore" : { - "filter" : { - "fquery" : { - "query" : { - "query_string" : { - "query" : "this AND that OR thus" - } - }, - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/range-filter.asciidoc b/docs/reference/query-dsl/filters/range-filter.asciidoc index 51d7390f1b1..0c84f91e196 100644 --- a/docs/reference/query-dsl/filters/range-filter.asciidoc +++ b/docs/reference/query-dsl/filters/range-filter.asciidoc @@ -95,11 +95,3 @@ requires more memory, so make sure you have sufficient memory on your nodes in order to use this execution mode. It usually makes sense to use it on fields you're already aggregating or sorting by. -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of reuse. The -`_cache` can be set to `false` to turn it off. - -Having the `now` expression used without rounding will make the filter unlikely to be -cached since reuse is very unlikely. diff --git a/docs/reference/query-dsl/filters/regexp-filter.asciidoc b/docs/reference/query-dsl/filters/regexp-filter.asciidoc index 1f11da47565..06a45ae0739 100644 --- a/docs/reference/query-dsl/filters/regexp-filter.asciidoc +++ b/docs/reference/query-dsl/filters/regexp-filter.asciidoc @@ -51,9 +51,7 @@ You have to enable caching explicitly in order to have the "flags" : "INTERSECTION|COMPLEMENT|EMPTY", "max_determinized_states": 20000 }, - "_name":"test", - "_cache" : true, - "_cache_key" : "key" + "_name":"test" } } } diff --git a/docs/reference/query-dsl/filters/script-filter.asciidoc b/docs/reference/query-dsl/filters/script-filter.asciidoc index f9e0cd19cee..2f49422d88a 100644 --- a/docs/reference/query-dsl/filters/script-filter.asciidoc +++ b/docs/reference/query-dsl/filters/script-filter.asciidoc @@ -43,11 +43,3 @@ to use the ability to pass parameters to the script itself, for example: } ---------------------------------------------- -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same script and parameters are used on several (many) other queries. -Note, the process of caching the first execution is higher when caching -(since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/term-filter.asciidoc b/docs/reference/query-dsl/filters/term-filter.asciidoc index cb249a83604..768fd94ac89 100644 --- a/docs/reference/query-dsl/filters/term-filter.asciidoc +++ b/docs/reference/query-dsl/filters/term-filter.asciidoc @@ -17,22 +17,3 @@ accept a filter, for example: } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of reuse. -The `_cache` can be set to `false` to turn it off. Here is an example: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "term" : { - "user" : "kimchy", - "_cache" : false - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/terms-filter.asciidoc b/docs/reference/query-dsl/filters/terms-filter.asciidoc index 19e9358a4dd..027fc174db2 100644 --- a/docs/reference/query-dsl/filters/terms-filter.asciidoc +++ b/docs/reference/query-dsl/filters/terms-filter.asciidoc @@ -18,13 +18,6 @@ Filters documents that have fields that match any of the provided terms The `terms` filter is also aliased with `in` as the filter name for simpler usage. -[float] -==== Caching - -The result of the filter is cached if there is evidence of reuse. It is -possible to enable caching explicitely by setting `_cache` to `true` and -to disable caching by setting `_cache` to `false`. - [float] ==== Terms lookup mechanism @@ -93,8 +86,7 @@ curl -XGET localhost:9200/tweets/_search -d '{ "type" : "user", "id" : "2", "path" : "followers" - }, - "_cache_key" : "user_2_friends" + } } } } @@ -102,10 +94,6 @@ curl -XGET localhost:9200/tweets/_search -d '{ }' -------------------------------------------------- -If there are lots of matching values, then `_cache_key` is recommended to be set, -so that the filter cache will not store a reference to the potentially heavy -terms filter. - The structure of the external terms document can also include array of inner objects, for example: diff --git a/rest-api-spec/api/indices.clear_cache.json b/rest-api-spec/api/indices.clear_cache.json index 9fd73acbd01..c8e3e84de88 100644 --- a/rest-api-spec/api/indices.clear_cache.json +++ b/rest-api-spec/api/indices.clear_cache.json @@ -32,10 +32,6 @@ "type" : "boolean", "description" : "Clear filter caches" }, - "filter_keys": { - "type" : "boolean", - "description" : "A comma-separated list of keys to clear when using the `filter_cache` parameter (default: all)" - }, "id": { "type" : "boolean", "description" : "Clear ID caches for parent/child" diff --git a/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java b/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java index 7528206f6ae..936fe490a5d 100644 --- a/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java +++ b/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java @@ -18,9 +18,9 @@ package org.apache.lucene.search.postingshighlight; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; @@ -91,8 +91,7 @@ public final class CustomPostingsHighlighter extends XPostingsHighlighter { /* Our own api to highlight a single document field, passing in the query terms, and get back our own Snippet object */ - public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexSearcher searcher, int docId, int maxPassages) throws IOException { - IndexReader reader = searcher.getIndexReader(); + public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexReader reader, int docId, int maxPassages) throws IOException { IndexReaderContext readerContext = reader.getContext(); List leaves = readerContext.leaves(); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java index 2f0b0e7d4f2..d2395abf5f8 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.stats; import com.carrotsearch.hppc.ObjectObjectOpenHashMap; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java index 59e4f3a8842..3a96c83b3ac 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java @@ -37,7 +37,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest 0) { - clearedAtLeastOne = true; - service.cache().filter().clear("api", request.filterKeys()); - } if (request.fieldDataCache()) { clearedAtLeastOne = true; if (request.fields() == null || request.fields().length == 0) { diff --git a/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java index 0bcada3c827..312c5979994 100644 --- a/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java +++ b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.ttl.IndicesTTLService; @@ -62,9 +61,6 @@ public class ClusterDynamicSettingsModule extends AbstractModule { clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP + "*"); clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "*"); clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP + "*"); - clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_SIZE); - clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_EXPIRE, Validator.TIME); - clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, Validator.POSITIVE_INTEGER); clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_TYPE); clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); clusterDynamicSettings.addDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME); diff --git a/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java b/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java new file mode 100644 index 00000000000..d31cd3835ec --- /dev/null +++ b/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Weight; + +import java.io.IOException; +import java.util.Objects; + +/** + * Base implementation for a query which is cacheable at the index level but + * not the segment level as usually expected. + */ +public abstract class IndexCacheableQuery extends Query { + + private Object readerCacheKey; + + @Override + public Query rewrite(IndexReader reader) throws IOException { + if (reader.getCoreCacheKey() != this.readerCacheKey) { + IndexCacheableQuery rewritten = (IndexCacheableQuery) clone(); + rewritten.readerCacheKey = reader.getCoreCacheKey(); + return rewritten; + } + return super.rewrite(reader); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) + && readerCacheKey == ((IndexCacheableQuery) obj).readerCacheKey; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hashCode(readerCacheKey); + } + + @Override + public final Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + if (readerCacheKey == null) { + throw new IllegalStateException("Rewrite first"); + } + if (readerCacheKey != searcher.getIndexReader().getCoreCacheKey()) { + throw new IllegalStateException("Must create weight on the same reader which has been used for rewriting"); + } + return doCreateWeight(searcher, needsScores); + } + + /** Create a {@link Weight} for this query. + * @see Query#createWeight(IndexSearcher, boolean) + */ + public abstract Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java b/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java new file mode 100644 index 00000000000..0d9270edaff --- /dev/null +++ b/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene; + +import com.google.common.collect.HashMultimap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Multimap; + +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReader.CoreClosedListener; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardUtils; + +import java.io.IOException; +import java.util.IdentityHashMap; +import java.util.Map; +import java.util.Set; + +/** + * A map between segment core cache keys and the shard that these segments + * belong to. This allows to get the shard that a segment belongs to or to get + * the entire set of live core cache keys for a given index. In order to work + * this class needs to be notified about new segments. It modifies the current + * mappings as segments that were not known before are added and prevents the + * structure from growing indefinitely by registering close listeners on these + * segments so that at any time it only tracks live segments. + * + * NOTE: This is heavy. Avoid using this class unless absolutely required. + */ +public final class ShardCoreKeyMap { + + private final Map coreKeyToShard; + private final Multimap indexToCoreKey; + + public ShardCoreKeyMap() { + coreKeyToShard = new IdentityHashMap<>(); + indexToCoreKey = HashMultimap.create(); + } + + /** + * Register a {@link LeafReader}. This is necessary so that the core cache + * key of this reader can be found later using {@link #getCoreCacheKeys(ShardId)}. + */ + public void add(LeafReader reader) { + final ShardId shardId = ShardUtils.extractShardId(reader); + if (shardId == null) { + throw new IllegalArgumentException("Could not extract shard id from " + reader); + } + final Object coreKey = reader.getCoreCacheKey(); + final String index = shardId.getIndex(); + synchronized (this) { + if (coreKeyToShard.put(coreKey, shardId) == null) { + final boolean added = indexToCoreKey.put(index, coreKey); + assert added; + reader.addCoreClosedListener(new CoreClosedListener() { + @Override + public void onClose(Object ownerCoreCacheKey) throws IOException { + assert coreKey == ownerCoreCacheKey; + synchronized (ShardCoreKeyMap.this) { + coreKeyToShard.remove(ownerCoreCacheKey); + indexToCoreKey.remove(index, coreKey); + } + } + }); + } + } + } + + /** + * Return the {@link ShardId} that holds the given segment, or {@code null} + * if this segment is not tracked. + */ + public synchronized ShardId getShardId(Object coreKey) { + return coreKeyToShard.get(coreKey); + } + + /** + * Get the set of core cache keys associated with the given index. + */ + public synchronized Set getCoreKeysForIndex(String index) { + return ImmutableSet.copyOf(indexToCoreKey.get(index)); + } + + /** + * Return the number of tracked segments. + */ + public synchronized int size() { + assert indexToCoreKey.size() == coreKeyToShard.size(); + return coreKeyToShard.size(); + } + +} diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java index e05c11905ec..71cc5d7f9c2 100644 --- a/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java +++ b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java @@ -22,6 +22,8 @@ package org.elasticsearch.common.lucene.docset; import org.apache.lucene.index.LeafReader; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.Bits; @@ -104,32 +106,41 @@ public class DocIdSets { } /** - * Given a {@link DocIdSet}, return a {@link Bits} instance that will match + * Given a {@link Scorer}, return a {@link Bits} instance that will match * all documents contained in the set. Note that the returned {@link Bits} - * instance should only be consumed once and in order. + * instance MUST be consumed in order. */ - public static Bits asSequentialAccessBits(final int maxDoc, @Nullable DocIdSet set) throws IOException { - if (set == null) { + public static Bits asSequentialAccessBits(final int maxDoc, @Nullable Scorer scorer) throws IOException { + if (scorer == null) { return new Bits.MatchNoBits(maxDoc); } - Bits bits = set.bits(); - if (bits != null) { - return bits; - } - final DocIdSetIterator iterator = set.iterator(); - if (iterator == null) { - return new Bits.MatchNoBits(maxDoc); + final TwoPhaseIterator twoPhase = scorer.asTwoPhaseIterator(); + final DocIdSetIterator iterator; + if (twoPhase == null) { + iterator = scorer; + } else { + iterator = twoPhase.approximation(); } + return new Bits() { - int previous = 0; + int previous = -1; + boolean previousMatched = false; @Override public boolean get(int index) { + if (index < 0 || index >= maxDoc) { + throw new IndexOutOfBoundsException(index + " is out of bounds: [" + 0 + "-" + maxDoc + "["); + } if (index < previous) { throw new IllegalArgumentException("This Bits instance can only be consumed in order. " + "Got called on [" + index + "] while previously called on [" + previous + "]"); } + if (index == previous) { + // we cache whether it matched because it is illegal to call + // twoPhase.matches() twice + return previousMatched; + } previous = index; int doc = iterator.docID(); @@ -140,7 +151,14 @@ public class DocIdSets { throw new IllegalStateException("Cannot advance iterator", e); } } - return index == doc; + if (index == doc) { + try { + return previousMatched = twoPhase == null || twoPhase.matches(); + } catch (IOException e) { + throw new IllegalStateException("Cannot validate match", e); + } + } + return previousMatched = false; } @Override diff --git a/src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java deleted file mode 100644 index 027f794e6f1..00000000000 --- a/src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.search; - -import org.apache.lucene.search.Filter; - -/** - * A marker indicating that this is a cached filter. - */ -public abstract class CachedFilter extends Filter { - - public static boolean isCached(Filter filter) { - return filter instanceof CachedFilter; - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java b/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java index 7501307264b..770ddac0ce3 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java @@ -31,18 +31,18 @@ import java.io.IOException; public class FilteredCollector implements Collector { private final Collector collector; - private final Filter filter; + private final Weight filter; - public FilteredCollector(Collector collector, Filter filter) { + public FilteredCollector(Collector collector, Weight filter) { this.collector = collector; this.filter = filter; } @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { - final DocIdSet set = filter.getDocIdSet(context, null); + final Scorer filterScorer = filter.scorer(context, null); final LeafCollector in = collector.getLeafCollector(context); - final Bits bits = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), set); + final Bits bits = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); return new FilterLeafCollector(in) { @Override diff --git a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java deleted file mode 100644 index 73b3ba0590c..00000000000 --- a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.search; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.DocIdSet; -import org.apache.lucene.search.Filter; -import org.apache.lucene.util.Bits; - -import java.io.IOException; - -/** - * A marker interface for {@link org.apache.lucene.search.Filter} denoting the filter - * as one that should not be cached, ever. - */ -public abstract class NoCacheFilter extends Filter { - - private static final class NoCacheFilterWrapper extends NoCacheFilter { - private final Filter delegate; - private NoCacheFilterWrapper(Filter delegate) { - this.delegate = delegate; - } - - @Override - public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { - return delegate.getDocIdSet(context, acceptDocs); - } - - @Override - public int hashCode() { - return delegate.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj instanceof NoCacheFilterWrapper) { - return delegate.equals(((NoCacheFilterWrapper)obj).delegate); - } - return false; - } - - @Override - public String toString(String field) { - - return "no_cache(" + delegate + ")"; - } - - } - - /** - * Wraps a filter in a NoCacheFilter or returns it if it already is a NoCacheFilter. - */ - public static Filter wrap(Filter filter) { - if (filter instanceof NoCacheFilter) { - return filter; - } - return new NoCacheFilterWrapper(filter); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/NoCacheQuery.java deleted file mode 100644 index c5bec8c5d9b..00000000000 --- a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheQuery.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.search; - -import org.apache.lucene.search.Query; - -/** - * Queries are never cached directly, but a query can be wrapped in a filter that may end being cached. - * Filters that wrap this query either directly or indirectly will never be cached. - */ -public abstract class NoCacheQuery extends Query { - - @Override - public final String toString(String s) { - return "no_cache(" + innerToString(s) + ")"; - } - - public abstract String innerToString(String s); -} diff --git a/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index 69ed1f68d64..fe33206b0cc 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -31,10 +31,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; -import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import java.util.List; import java.util.regex.Pattern; @@ -54,19 +51,19 @@ public class Queries { } public static Filter newMatchAllFilter() { - return wrap(newMatchAllQuery()); + return new QueryWrapperFilter(newMatchAllQuery()); } public static Filter newMatchNoDocsFilter() { - return wrap(newMatchNoDocsQuery()); + return new QueryWrapperFilter(newMatchNoDocsQuery()); } public static Filter newNestedFilter() { - return wrap(new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__")))); + return new QueryWrapperFilter(new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__")))); } public static Filter newNonNestedFilter() { - return wrap(not(newNestedFilter())); + return new QueryWrapperFilter(not(newNestedFilter())); } /** Return a query that matches all documents but those that match the given query. */ @@ -169,24 +166,4 @@ public class Queries { optionalClauseCount : (result < 0 ? 0 : result)); } - - /** - * Wraps a query in a filter. - * - * If a filter has an anti per segment execution / caching nature then @{@link CustomQueryWrappingFilter} is returned - * otherwise the standard {@link org.apache.lucene.search.QueryWrapperFilter} is returned. - */ - @SuppressForbidden(reason = "QueryWrapperFilter cachability") - public static Filter wrap(Query query, QueryParseContext context) { - if ((context != null && context.requireCustomQueryWrappingFilter()) || CustomQueryWrappingFilter.shouldUseCustomQueryWrappingFilter(query)) { - return new CustomQueryWrappingFilter(query); - } else { - return new QueryWrapperFilter(query); - } - } - - /** Wrap as a {@link Filter}. */ - public static Filter wrap(Query query) { - return wrap(query, null); - } } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java index 8ecb6228705..a4c92d78804 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java @@ -19,9 +19,11 @@ package org.elasticsearch.common.lucene.search; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; import org.apache.lucene.util.Bits; import java.io.IOException; @@ -46,4 +48,13 @@ public abstract class ResolvableFilter extends Filter { return null; } } + + @Override + public Query rewrite(IndexReader reader) throws IOException { + final Filter resolved = resolve(); + if (resolved != null) { + return resolved; + } + return super.rewrite(reader); + } } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java index 03dc0fcfb9e..d1835f57098 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java @@ -119,16 +119,22 @@ public class FiltersFunctionScoreQuery extends Query { // TODO: needsScores // if we dont need scores, just return the underlying Weight? Weight subQueryWeight = subQuery.createWeight(searcher, needsScores); - return new CustomBoostFactorWeight(this, subQueryWeight); + Weight[] filterWeights = new Weight[filterFunctions.length]; + for (int i = 0; i < filterFunctions.length; ++i) { + filterWeights[i] = searcher.createNormalizedWeight(filterFunctions[i].filter, false); + } + return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights); } class CustomBoostFactorWeight extends Weight { final Weight subQueryWeight; + final Weight[] filterWeights; - public CustomBoostFactorWeight(Query parent, Weight subQueryWeight) throws IOException { + public CustomBoostFactorWeight(Query parent, Weight subQueryWeight, Weight[] filterWeights) throws IOException { super(parent); this.subQueryWeight = subQueryWeight; + this.filterWeights = filterWeights; } @Override @@ -162,7 +168,8 @@ public class FiltersFunctionScoreQuery extends Query { for (int i = 0; i < filterFunctions.length; i++) { FilterFunction filterFunction = filterFunctions[i]; functions[i] = filterFunction.function.getLeafScoreFunction(context); - docSets[i] = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterFunction.filter.getDocIdSet(context, acceptDocs)); + Scorer filterScorer = filterWeights[i].scorer(context, null); // no need to apply accepted docs + docSets[i] = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); } return new FiltersFunctionFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, minScore); } @@ -177,7 +184,8 @@ public class FiltersFunctionScoreQuery extends Query { // First: Gather explanations for all filters List filterExplanations = new ArrayList<>(); float weightSum = 0; - for (FilterFunction filterFunction : filterFunctions) { + for (int i = 0; i < filterFunctions.length; ++i) { + FilterFunction filterFunction = filterFunctions[i]; if (filterFunction.function instanceof WeightFactorFunction) { weightSum += ((WeightFactorFunction) filterFunction.function).getWeight(); @@ -186,7 +194,7 @@ public class FiltersFunctionScoreQuery extends Query { } Bits docSet = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), - filterFunction.filter.getDocIdSet(context, context.reader().getLiveDocs())); + filterWeights[i].scorer(context, null)); if (docSet.get(doc)) { Explanation functionExplanation = filterFunction.function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl); double factor = functionExplanation.getValue(); diff --git a/src/main/java/org/elasticsearch/index/IndexService.java b/src/main/java/org/elasticsearch/index/IndexService.java index 74c0e87f44c..6b192981dca 100644 --- a/src/main/java/org/elasticsearch/index/IndexService.java +++ b/src/main/java/org/elasticsearch/index/IndexService.java @@ -22,6 +22,7 @@ package org.elasticsearch.index; import com.google.common.base.Function; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; + import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -148,7 +149,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone this.indicesLifecycle = (InternalIndicesLifecycle) injector.getInstance(IndicesLifecycle.class); // inject workarounds for cyclic dep - indexCache.filter().setIndexService(this); indexFieldData.setIndexService(this); bitSetFilterCache.setIndexService(this); this.nodeEnv = nodeEnv; diff --git a/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java b/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java index 7d142a0c803..940344a627c 100644 --- a/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java +++ b/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java @@ -22,10 +22,10 @@ package org.elasticsearch.index.aliases; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentFactory; @@ -109,7 +109,7 @@ public class IndexAliasesService extends AbstractIndexComponent implements Itera return null; } } - return Queries.wrap(combined); + return new QueryWrapperFilter(combined); } } diff --git a/src/main/java/org/elasticsearch/index/cache/IndexCache.java b/src/main/java/org/elasticsearch/index/cache/IndexCache.java index 338b49f0490..3b71f735c2e 100644 --- a/src/main/java/org/elasticsearch/index/cache/IndexCache.java +++ b/src/main/java/org/elasticsearch/index/cache/IndexCache.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.cache; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -37,12 +38,14 @@ import java.io.IOException; public class IndexCache extends AbstractIndexComponent implements Closeable { private final FilterCache filterCache; + private final QueryCachingPolicy filterCachingPolicy; private final BitsetFilterCache bitsetFilterCache; @Inject - public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, BitsetFilterCache bitsetFilterCache) { + public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryCachingPolicy filterCachingPolicy, BitsetFilterCache bitsetFilterCache) { super(index, indexSettings); this.filterCache = filterCache; + this.filterCachingPolicy = filterCachingPolicy; this.bitsetFilterCache = bitsetFilterCache; } @@ -50,6 +53,10 @@ public class IndexCache extends AbstractIndexComponent implements Closeable { return filterCache; } + public QueryCachingPolicy filterPolicy() { + return filterCachingPolicy; + } + /** * Return the {@link BitsetFilterCache} for this index. */ diff --git a/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 13fc57ed1ef..284ecc0ecb7 100644 --- a/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -36,7 +36,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.NoCacheFilter; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -105,7 +104,6 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea public BitDocIdSetFilter getBitDocIdSetFilter(Filter filter) { assert filter != null; - assert !(filter instanceof NoCacheFilter); return new BitDocIdSetFilterWrapper(filter); } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java index a16b5da2bd9..37c45e3adf7 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java @@ -19,19 +19,14 @@ package org.elasticsearch.index.cache.filter; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.index.IndexComponent; -import org.elasticsearch.index.IndexService; import java.io.Closeable; /** * */ -public interface FilterCache extends IndexComponent, Closeable { +public interface FilterCache extends IndexComponent, Closeable, org.apache.lucene.search.QueryCache { static class EntriesStats { public final long sizeInBytes; @@ -43,16 +38,5 @@ public interface FilterCache extends IndexComponent, Closeable { } } - // we need to "inject" the index service to not create cyclic dep - void setIndexService(IndexService indexService); - - String type(); - - Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy policy); - - void clear(Object reader); - void clear(String reason); - - void clear(String reason, String[] keys); } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java index 551ea4fa279..20496e3266b 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java @@ -24,7 +24,7 @@ import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Scopes; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; /** * @@ -46,7 +46,7 @@ public class FilterCacheModule extends AbstractModule { @Override protected void configure() { bind(FilterCache.class) - .to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache")) + .to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache")) .in(Scopes.SINGLETON); // the filter cache is a node-level thing, however we want the most popular filters // to be computed on a per-index basis, that is why we don't use the SINGLETON diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java index e56a1145d08..948f7e57702 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java @@ -33,32 +33,79 @@ import java.io.IOException; */ public class FilterCacheStats implements Streamable, ToXContent { - long memorySize; - long evictions; + long ramBytesUsed; + long hitCount; + long missCount; + long cacheCount; + long cacheSize; public FilterCacheStats() { } - public FilterCacheStats(long memorySize, long evictions) { - this.memorySize = memorySize; - this.evictions = evictions; + public FilterCacheStats(long ramBytesUsed, long hitCount, long missCount, long cacheCount, long cacheSize) { + this.ramBytesUsed = ramBytesUsed; + this.hitCount = hitCount; + this.missCount = missCount; + this.cacheCount = cacheCount; + this.cacheSize = cacheSize; } public void add(FilterCacheStats stats) { - this.memorySize += stats.memorySize; - this.evictions += stats.evictions; + ramBytesUsed += stats.ramBytesUsed; + hitCount += stats.hitCount; + missCount += stats.missCount; + cacheCount += stats.cacheCount; + cacheSize += stats.cacheSize; } public long getMemorySizeInBytes() { - return this.memorySize; + return ramBytesUsed; } public ByteSizeValue getMemorySize() { - return new ByteSizeValue(memorySize); + return new ByteSizeValue(ramBytesUsed); } + /** + * The total number of lookups in the cache. + */ + public long getTotalCount() { + return hitCount + missCount; + } + + /** + * The number of successful lookups in the cache. + */ + public long getHitCount() { + return hitCount; + } + + /** + * The number of lookups in the cache that failed to retrieve a {@link DocIdSet}. + */ + public long getMissCount() { + return missCount; + } + + /** + * The number of {@link DocIdSet}s that have been cached. + */ + public long getCacheCount() { + return cacheCount; + } + + /** + * The number of {@link DocIdSet}s that are in the cache. + */ + public long getCacheSize() { + return cacheSize; + } + + /** + * The number of {@link DocIdSet}s that have been evicted from the cache. + */ public long getEvictions() { - return this.evictions; + return cacheCount - cacheSize; } public static FilterCacheStats readFilterCacheStats(StreamInput in) throws IOException { @@ -67,22 +114,34 @@ public class FilterCacheStats implements Streamable, ToXContent { return stats; } + @Override public void readFrom(StreamInput in) throws IOException { - memorySize = in.readVLong(); - evictions = in.readVLong(); + ramBytesUsed = in.readLong(); + hitCount = in.readLong(); + missCount = in.readLong(); + cacheCount = in.readLong(); + cacheSize = in.readLong(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(memorySize); - out.writeVLong(evictions); + out.writeLong(ramBytesUsed); + out.writeLong(hitCount); + out.writeLong(missCount); + out.writeLong(cacheCount); + out.writeLong(cacheSize); } @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(Fields.FILTER_CACHE); - builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize); + builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, ramBytesUsed); + builder.field(Fields.TOTAL_COUNT, getTotalCount()); + builder.field(Fields.HIT_COUNT, getHitCount()); + builder.field(Fields.MISS_COUNT, getMissCount()); + builder.field(Fields.CACHE_SIZE, getCacheSize()); + builder.field(Fields.CACHE_COUNT, getCacheCount()); builder.field(Fields.EVICTIONS, getEvictions()); builder.endObject(); return builder; @@ -92,6 +151,12 @@ public class FilterCacheStats implements Streamable, ToXContent { static final XContentBuilderString FILTER_CACHE = new XContentBuilderString("filter_cache"); static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size"); static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes"); + static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count"); + static final XContentBuilderString HIT_COUNT = new XContentBuilderString("hit_count"); + static final XContentBuilderString MISS_COUNT = new XContentBuilderString("miss_count"); + static final XContentBuilderString CACHE_SIZE = new XContentBuilderString("cache_size"); + static final XContentBuilderString CACHE_COUNT = new XContentBuilderString("cache_count"); static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions"); } + } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java index 67ab084bd07..97f75094580 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java @@ -19,45 +19,35 @@ package org.elasticsearch.index.cache.filter; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import org.apache.lucene.search.DocIdSet; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.docset.DocIdSets; -import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.cache.filter.IndicesFilterCache; + +import java.io.Closeable; +import java.io.IOException; /** */ -public class ShardFilterCache extends AbstractIndexShardComponent implements RemovalListener { +public class ShardFilterCache extends AbstractIndexShardComponent implements Closeable { - final CounterMetric evictionsMetric = new CounterMetric(); - final CounterMetric totalMetric = new CounterMetric(); + final IndicesFilterCache cache; @Inject - public ShardFilterCache(ShardId shardId, @IndexSettings Settings indexSettings) { + public ShardFilterCache(ShardId shardId, @IndexSettings Settings indexSettings, IndicesFilterCache cache) { super(shardId, indexSettings); + this.cache = cache; } public FilterCacheStats stats() { - return new FilterCacheStats(totalMetric.count(), evictionsMetric.count()); - } - - public void onCached(long sizeInBytes) { - totalMetric.inc(sizeInBytes); + return cache.getStats(shardId); } @Override - public void onRemoval(RemovalNotification removalNotification) { - if (removalNotification.wasEvicted()) { - evictionsMetric.inc(); - } - if (removalNotification.getValue() != null) { - totalMetric.dec(DocIdSets.sizeInBytes(removalNotification.getValue())); - } + public void close() throws IOException { + cache.onClose(shardId); } + } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java new file mode 100644 index 00000000000..5dfaf4c7799 --- /dev/null +++ b/src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.cache.filter.index; + +import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.Weight; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.indices.cache.filter.IndicesFilterCache; + +/** + * The index-level filter cache. This class mostly delegates to the node-level + * filter cache: {@link IndicesFilterCache}. + */ +public class IndexFilterCache extends AbstractIndexComponent implements FilterCache { + + final IndicesFilterCache indicesFilterCache; + + @Inject + public IndexFilterCache(Index index, @IndexSettings Settings indexSettings, IndicesFilterCache indicesFilterCache) { + super(index, indexSettings); + this.indicesFilterCache = indicesFilterCache; + } + + @Override + public void close() throws ElasticsearchException { + clear("close"); + } + + @Override + public void clear(String reason) { + logger.debug("full cache clear, reason [{}]", reason); + indicesFilterCache.clearIndex(index.getName()); + } + + @Override + public Weight doCache(Weight weight, QueryCachingPolicy policy) { + return indicesFilterCache.doCache(weight, policy); + } + +} diff --git a/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java index 41a704a9afd..ded3c207a42 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java @@ -19,15 +19,12 @@ package org.elasticsearch.index.cache.filter.none; -import org.apache.lucene.search.Filter; import org.apache.lucene.search.QueryCachingPolicy; -import org.elasticsearch.common.Nullable; +import org.apache.lucene.search.Weight; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.settings.IndexSettings; @@ -42,38 +39,18 @@ public class NoneFilterCache extends AbstractIndexComponent implements FilterCac logger.debug("Using no filter cache"); } - @Override - public void setIndexService(IndexService indexService) { - // nothing to do here... - } - - @Override - public String type() { - return "none"; - } - @Override public void close() { // nothing to do here } @Override - public Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy policy) { - return filterToCache; + public Weight doCache(Weight weight, QueryCachingPolicy policy) { + return weight; } @Override public void clear(String reason) { // nothing to do here } - - @Override - public void clear(String reason, String[] keys) { - // nothing to do there - } - - @Override - public void clear(Object reader) { - // nothing to do here - } } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java deleted file mode 100644 index 9e7a3772860..00000000000 --- a/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.cache.filter.weighted; - -import com.google.common.cache.Cache; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.Weigher; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SegmentReader; -import org.apache.lucene.search.BitsFilteredDocIdSet; -import org.apache.lucene.search.DocIdSet; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.docset.DocIdSets; -import org.elasticsearch.common.lucene.search.CachedFilter; -import org.elasticsearch.common.lucene.search.NoCacheFilter; -import org.elasticsearch.common.lucene.search.ResolvableFilter; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.index.AbstractIndexComponent; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.cache.filter.FilterCache; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardUtils; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; - -import java.io.IOException; -import java.util.concurrent.ConcurrentMap; - -public class WeightedFilterCache extends AbstractIndexComponent implements FilterCache, SegmentReader.CoreClosedListener, IndexReader.ReaderClosedListener { - - final IndicesFilterCache indicesFilterCache; - IndexService indexService; - - final ConcurrentMap seenReaders = ConcurrentCollections.newConcurrentMap(); - - @Inject - public WeightedFilterCache(Index index, @IndexSettings Settings indexSettings, IndicesFilterCache indicesFilterCache) { - super(index, indexSettings); - this.indicesFilterCache = indicesFilterCache; - } - - @Override - public void setIndexService(IndexService indexService) { - this.indexService = indexService; - } - - @Override - public String type() { - return "weighted"; - } - - @Override - public void close() { - clear("close"); - } - - @Override - public void onClose(IndexReader reader) { - clear(reader.getCoreCacheKey()); - } - - - @Override - public void clear(String reason) { - logger.debug("full cache clear, reason [{}]", reason); - for (Object readerKey : seenReaders.keySet()) { - Boolean removed = seenReaders.remove(readerKey); - if (removed == null) { - return; - } - indicesFilterCache.addReaderKeyToClean(readerKey); - } - } - - @Override - public void clear(String reason, String[] keys) { - logger.debug("clear keys [], reason [{}]", reason, keys); - for (String key : keys) { - final HashedBytesRef keyBytes = new HashedBytesRef(key); - for (Object readerKey : seenReaders.keySet()) { - indicesFilterCache.cache().invalidate(new FilterCacheKey(readerKey, keyBytes)); - } - } - } - - @Override - public void onClose(Object coreKey) { - clear(coreKey); - } - - @Override - public void clear(Object coreCacheKey) { - // we add the seen reader before we add the first cache entry for this reader - // so, if we don't see it here, its won't be in the cache - Boolean removed = seenReaders.remove(coreCacheKey); - if (removed == null) { - return; - } - indicesFilterCache.addReaderKeyToClean(coreCacheKey); - } - - @Override - public Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy cachePolicy) { - if (filterToCache == null) { - return null; - } - if (filterToCache instanceof NoCacheFilter) { - return filterToCache; - } - if (CachedFilter.isCached(filterToCache)) { - return filterToCache; - } - if (filterToCache instanceof ResolvableFilter) { - throw new IllegalArgumentException("Cannot cache instances of ResolvableFilter: " + filterToCache); - } - return new FilterCacheFilterWrapper(filterToCache, cacheKey, cachePolicy, this); - } - - static class FilterCacheFilterWrapper extends CachedFilter { - - private final Filter filter; - private final Object filterCacheKey; - private final QueryCachingPolicy cachePolicy; - private final WeightedFilterCache cache; - - FilterCacheFilterWrapper(Filter filter, Object cacheKey, QueryCachingPolicy cachePolicy, WeightedFilterCache cache) { - this.filter = filter; - this.filterCacheKey = cacheKey != null ? cacheKey : filter; - this.cachePolicy = cachePolicy; - this.cache = cache; - } - - @Override - public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { - if (context.ord == 0) { - cachePolicy.onUse(filter); - } - FilterCacheKey cacheKey = new FilterCacheKey(context.reader().getCoreCacheKey(), filterCacheKey); - Cache innerCache = cache.indicesFilterCache.cache(); - - DocIdSet cacheValue = innerCache.getIfPresent(cacheKey); - final DocIdSet ret; - if (cacheValue != null) { - ret = cacheValue; - } else { - final DocIdSet uncached = filter.getDocIdSet(context, null); - if (cachePolicy.shouldCache(filter, context)) { - if (!cache.seenReaders.containsKey(context.reader().getCoreCacheKey())) { - Boolean previous = cache.seenReaders.putIfAbsent(context.reader().getCoreCacheKey(), Boolean.TRUE); - if (previous == null) { - // we add a core closed listener only, for non core IndexReaders we rely on clear being called (percolator for example) - context.reader().addCoreClosedListener(cache); - } - } - // we can't pass down acceptedDocs provided, because we are caching the result, and acceptedDocs - // might be specific to a query. We don't pass the live docs either because a cache built for a specific - // generation of a segment might be reused by an older generation which has fewer deleted documents - cacheValue = DocIdSets.toCacheable(context.reader(), uncached); - // we might put the same one concurrently, that's fine, it will be replaced and the removal - // will be called - ShardId shardId = ShardUtils.extractShardId(context.reader()); - if (shardId != null) { - IndexShard shard = cache.indexService.shard(shardId.id()); - if (shard != null) { - cacheKey.removalListener = shard.filterCache(); - shard.filterCache().onCached(DocIdSets.sizeInBytes(cacheValue)); - } - } - innerCache.put(cacheKey, cacheValue); - ret = cacheValue; - } else { - // uncached - ret = uncached; - } - } - - return BitsFilteredDocIdSet.wrap(DocIdSets.isEmpty(ret) ? null : ret, acceptDocs); - } - - @Override - public String toString(String field) { - return "cache(" + filter + ")"; - } - - @Override - public boolean equals(Object o) { - if (super.equals(o) == false) return false; - return this.filter.equals(((FilterCacheFilterWrapper) o).filter); - } - - @Override - public int hashCode() { - return 31 * super.hashCode() + filter.hashCode(); - } - } - - - /** A weigher for the Guava filter cache that uses a minimum entry size */ - public static class FilterCacheValueWeigher implements Weigher { - - private final int minimumEntrySize; - - public FilterCacheValueWeigher(int minimumEntrySize) { - this.minimumEntrySize = minimumEntrySize; - } - - @Override - public int weigh(FilterCacheKey key, DocIdSet value) { - int weight = (int) Math.min(DocIdSets.sizeInBytes(value), Integer.MAX_VALUE); - return Math.max(weight, this.minimumEntrySize); - } - } - - public static class FilterCacheKey { - private final Object readerKey; - private final Object filterKey; - - // if we know, we will try and set the removal listener (for statistics) - // its ok that its not volatile because we make sure we only set it when the object is created before its shared between threads - @Nullable - public RemovalListener removalListener; - - public FilterCacheKey(Object readerKey, Object filterKey) { - this.readerKey = readerKey; - this.filterKey = filterKey; - } - - public Object readerKey() { - return readerKey; - } - - public Object filterKey() { - return filterKey; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; -// if (o == null || getClass() != o.getClass()) return false; - FilterCacheKey that = (FilterCacheKey) o; - return (readerKey().equals(that.readerKey()) && filterKey.equals(that.filterKey)); - } - - @Override - public int hashCode() { - return readerKey().hashCode() + 31 * filterKey.hashCode(); - } - } -} diff --git a/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java b/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java index e7246d0e942..808542fadc4 100644 --- a/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java +++ b/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java @@ -21,14 +21,10 @@ package org.elasticsearch.index.cache.query; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; -import org.apache.lucene.search.DocIdSet; -import org.elasticsearch.common.bytes.BytesReference; + import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.FilterCacheStats; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.ShardId; diff --git a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index ac072115cc7..9c069139173 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -21,6 +21,8 @@ package org.elasticsearch.index.engine; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.search.QueryCache; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; @@ -74,6 +76,8 @@ public final class EngineConfig { private final Similarity similarity; private final CodecService codecService; private final Engine.FailedEngineListener failedEngineListener; + private final QueryCache filterCache; + private final QueryCachingPolicy filterCachingPolicy; /** * Index setting for index concurrency / number of threadstates in the indexwriter. @@ -130,7 +134,11 @@ public final class EngineConfig { /** * Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */ - public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService, IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, Translog translog, MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, TranslogRecoveryPerformer translogRecoveryPerformer) { + public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService, + IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, + Translog translog, MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer, + Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, + TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache filterCache, QueryCachingPolicy filterCachingPolicy) { this.shardId = shardId; this.threadPool = threadPool; this.indexingService = indexingService; @@ -155,6 +163,8 @@ public final class EngineConfig { versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE); updateVersionMapSize(); this.translogRecoveryPerformer = translogRecoveryPerformer; + this.filterCache = filterCache; + this.filterCachingPolicy = filterCachingPolicy; } /** updates {@link #versionMapSize} based on current setting and {@link #indexingBufferSize} */ @@ -396,4 +406,18 @@ public final class EngineConfig { public TranslogRecoveryPerformer getTranslogRecoveryPerformer() { return translogRecoveryPerformer; } + + /** + * Return the cache to use for filters. + */ + public QueryCache getFilterCache() { + return filterCache; + } + + /** + * Return the policy to use when caching filters. + */ + public QueryCachingPolicy getFilterCachingPolicy() { + return filterCachingPolicy; + } } diff --git a/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java b/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java index 7255b686bc8..fa8d9a6a5c1 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java @@ -40,7 +40,9 @@ public class EngineSearcherFactory extends SearcherFactory { @Override public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = super.newSearcher(reader, previousReader); + searcher.setQueryCache(engineConfig.getFilterCache()); + searcher.setQueryCachingPolicy(engineConfig.getFilterCachingPolicy()); searcher.setSimilarity(engineConfig.getSimilarity()); return searcher; } diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 7ea6db76926..1d0bbe21644 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1044,7 +1044,7 @@ public class InternalEngine extends Engine { try { assert isMergedSegment(reader); if (warmer != null) { - final Engine.Searcher searcher = new Searcher("warmer", new IndexSearcher(reader)); + final Engine.Searcher searcher = new Searcher("warmer", searcherFactory.newSearcher(reader, null)); final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId, searcher); warmer.warmNewReaders(context); } @@ -1077,8 +1077,7 @@ public class InternalEngine extends Engine { @Override public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { - IndexSearcher searcher = new IndexSearcher(reader); - searcher.setSimilarity(engineConfig.getSimilarity()); + IndexSearcher searcher = super.newSearcher(reader, previousReader); if (warmer != null) { // we need to pass a custom searcher that does not release anything on Engine.Search Release, // we will release explicitly @@ -1110,7 +1109,8 @@ public class InternalEngine extends Engine { } if (!readers.isEmpty()) { // we don't want to close the inner readers, just increase ref on them - newSearcher = new IndexSearcher(new MultiReader(readers.toArray(new IndexReader[readers.size()]), false)); + IndexReader newReader = new MultiReader(readers.toArray(new IndexReader[readers.size()]), false); + newSearcher = super.newSearcher(newReader, null); closeNewSearcher = true; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index b267bf3978c..fa459d36b13 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; + import org.apache.lucene.document.Field; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; @@ -43,7 +44,19 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.Mapping.SourceTransform; -import org.elasticsearch.index.mapper.internal.*; +import org.elasticsearch.index.mapper.internal.AllFieldMapper; +import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.internal.IdFieldMapper; +import org.elasticsearch.index.mapper.internal.IndexFieldMapper; +import org.elasticsearch.index.mapper.internal.ParentFieldMapper; +import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; +import org.elasticsearch.index.mapper.internal.SizeFieldMapper; +import org.elasticsearch.index.mapper.internal.SourceFieldMapper; +import org.elasticsearch.index.mapper.internal.TTLFieldMapper; +import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.mapper.internal.VersionFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.script.ExecutableScript; @@ -54,7 +67,12 @@ import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; /** @@ -343,7 +361,7 @@ public class DocumentMapper implements ToXContent { continue; } - Filter filter = sc.filterCache().cache(objectMapper.nestedTypeFilter(), null, sc.queryParserService().autoFilterCachePolicy()); + Filter filter = objectMapper.nestedTypeFilter(); if (filter == null) { continue; } diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 7d242c953c0..6fe69c809ff 100755 --- a/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; @@ -371,11 +372,11 @@ public class MapperService extends AbstractIndexComponent { BooleanQuery bq = new BooleanQuery(); bq.add(percolatorType, Occur.MUST_NOT); bq.add(Queries.newNonNestedFilter(), Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else if (hasNested) { return Queries.newNonNestedFilter(); } else if (filterPercolateType) { - return Queries.wrap(Queries.not(percolatorType)); + return new QueryWrapperFilter(Queries.not(percolatorType)); } else { return null; } @@ -384,12 +385,12 @@ public class MapperService extends AbstractIndexComponent { // since they have different types (starting with __) if (types.length == 1) { DocumentMapper docMapper = documentMapper(types[0]); - Filter filter = docMapper != null ? docMapper.typeFilter() : Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, types[0]))); + Filter filter = docMapper != null ? docMapper.typeFilter() : new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, types[0]))); if (filterPercolateType) { BooleanQuery bq = new BooleanQuery(); bq.add(percolatorType, Occur.MUST_NOT); bq.add(filter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { return filter; } @@ -419,9 +420,9 @@ public class MapperService extends AbstractIndexComponent { BooleanQuery bq = new BooleanQuery(); bq.add(percolatorType, Occur.MUST_NOT); bq.add(termsFilter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { - return Queries.wrap(termsFilter); + return new QueryWrapperFilter(termsFilter); } } else { // Current bool filter requires that at least one should clause matches, even with a must clause. @@ -441,7 +442,7 @@ public class MapperService extends AbstractIndexComponent { bool.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST); } - return Queries.wrap(bool); + return new QueryWrapperFilter(bool); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java index 7e97c86b4b6..8d004a31a91 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java @@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Objects; import com.google.common.collect.ImmutableList; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -35,6 +36,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; @@ -480,7 +482,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter termFilter(Object value, @Nullable QueryParseContext context) { - return Queries.wrap(new TermQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); + return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); } @Override @@ -499,7 +501,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { for (int i = 0; i < bytesRefs.length; i++) { bytesRefs[i] = indexedValueForSearch(values.get(i)); } - return Queries.wrap(new TermsQuery(names.indexName(), bytesRefs)); + return new QueryWrapperFilter(new TermsQuery(names.indexName(), bytesRefs)); } } @@ -529,7 +531,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(new TermRangeQuery(names.indexName(), + return new QueryWrapperFilter(new TermRangeQuery(names.indexName(), lowerTerm == null ? null : indexedValueForSearch(lowerTerm), upperTerm == null ? null : indexedValueForSearch(upperTerm), includeLower, includeUpper)); @@ -551,7 +553,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter prefixFilter(Object value, @Nullable QueryParseContext context) { - return Queries.wrap(new PrefixQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); + return new QueryWrapperFilter(new PrefixQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); } @Override @@ -565,7 +567,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter regexpFilter(Object value, int flags, int maxDeterminizedStates, @Nullable QueryParseContext parseContext) { - return Queries.wrap(new RegexpQuery(names().createIndexNameTerm(indexedValueForSearch(value)), flags, maxDeterminizedStates)); + return new QueryWrapperFilter(new RegexpQuery(names().createIndexNameTerm(indexedValueForSearch(value)), flags, maxDeterminizedStates)); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java index 7df9d2f179f..d01ff743279 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java @@ -24,13 +24,13 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -205,7 +205,7 @@ public class BooleanFieldMapper extends AbstractFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(new TermQuery(names().createIndexNameTerm(nullValue ? Values.TRUE : Values.FALSE))); + return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(nullValue ? Values.TRUE : Values.FALSE))); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index fc91a8ac061..37a6f0a33b5 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -34,7 +35,6 @@ import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -212,7 +212,7 @@ public class ByteFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValueAsInt(lowerTerm), upperTerm == null ? null : parseValueAsInt(upperTerm), includeLower, includeUpper)); @@ -231,7 +231,7 @@ public class ByteFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, nullValue.intValue(), nullValue.intValue(), true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 38309d729c6..5ab0049178b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -39,8 +40,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; -import org.elasticsearch.common.lucene.search.NoCacheQuery; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.ResolvableFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; @@ -392,7 +391,7 @@ public class DateFieldMapper extends NumberFieldMapper { if (fieldData != null) { filter = NumericRangeFieldDataFilter.newLongRange(fieldData, lowerVal,upperVal, includeLower, includeUpper); } else { - filter = Queries.wrap(NumericRangeQuery.newLongRange( + filter = new QueryWrapperFilter(NumericRangeQuery.newLongRange( names.indexName(), precisionStep, lowerVal, upperVal, includeLower, includeUpper )); } @@ -406,7 +405,7 @@ public class DateFieldMapper extends NumberFieldMapper { return null; } long value = parseStringValue(nullValue); - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, value, value, true, true)); @@ -588,7 +587,7 @@ public class DateFieldMapper extends NumberFieldMapper { } } - public final class LateParsingQuery extends NoCacheQuery { + public final class LateParsingQuery extends Query { final Object lowerTerm; final Object upperTerm; @@ -613,7 +612,7 @@ public class DateFieldMapper extends NumberFieldMapper { } @Override - public String innerToString(String s) { + public String toString(String s) { final StringBuilder sb = new StringBuilder(); return sb.append(names.indexName()).append(':') .append(includeLower ? '[' : '{') diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index d3802650074..e7ee19e3dba 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -31,6 +31,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -38,7 +39,6 @@ import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.util.ByteUtils; @@ -202,14 +202,14 @@ public class DoubleFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseDoubleValue(lowerTerm), upperTerm == null ? null : parseDoubleValue(upperTerm), includeLower, includeUpper)); } public Filter rangeFilter(Double lowerTerm, Double upperTerm, boolean includeLower, boolean includeUpper) { - return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, lowerTerm, upperTerm, includeLower, includeUpper)); + return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, lowerTerm, upperTerm, includeLower, includeUpper)); } @Override @@ -225,7 +225,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index ad5faa81c4e..968261805d9 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -31,6 +31,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -39,7 +40,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.util.ByteUtils; @@ -212,7 +212,7 @@ public class FloatFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -231,7 +231,7 @@ public class FloatFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index 647e7c3b5d1..8fc32539120 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -36,7 +37,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -206,7 +206,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -225,7 +225,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index a600769749a..bca94be2d34 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -36,7 +37,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -196,7 +196,7 @@ public class LongFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseLongValue(lowerTerm), upperTerm == null ? null : parseLongValue(upperTerm), includeLower, includeUpper)); @@ -215,7 +215,7 @@ public class LongFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index b0d15fac59d..a3a905aed99 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -36,7 +37,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -212,7 +212,7 @@ public class ShortFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValueAsInt(lowerTerm), upperTerm == null ? null : parseValueAsInt(upperTerm), includeLower, includeUpper)); @@ -220,7 +220,7 @@ public class ShortFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(QueryParseContext parseContext, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeFieldDataFilter.newShortRange((IndexNumericFieldData) parseContext.getForField(this), + return new QueryWrapperFilter(NumericRangeFieldDataFilter.newShortRange((IndexNumericFieldData) parseContext.getForField(this), lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -231,7 +231,7 @@ public class ShortFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, nullValue.intValue(), nullValue.intValue(), true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java index efa7fe36c0b..c9a38f18f9c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; @@ -41,7 +42,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -200,7 +200,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern if (fieldType.indexOptions() != IndexOptions.NONE || context == null) { return super.termFilter(value, context); } - return Queries.wrap(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value))); + return new QueryWrapperFilter(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value))); } @Override @@ -208,7 +208,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern if (fieldType.indexOptions() != IndexOptions.NONE || context == null) { return super.termsFilter(values, context); } - return Queries.wrap(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values))); + return new QueryWrapperFilter(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values))); } @Override @@ -238,7 +238,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern for (String queryType : queryTypes) { filter.add(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value)))), BooleanClause.Occur.SHOULD); } - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } @Override @@ -277,7 +277,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern filter.add(new RegexpQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))), flags, maxDeterminizedStates), BooleanClause.Occur.SHOULD); } - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index f1eca621076..d6a14f7be3c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; @@ -275,7 +276,7 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter } BytesRef bValue = BytesRefs.toBytesRef(value); if (Uid.hasDelimiter(bValue)) { - return Queries.wrap(new TermQuery(new Term(names.indexName(), bValue))); + return new QueryWrapperFilter(new TermQuery(new Term(names.indexName(), bValue))); } List types = new ArrayList<>(context.mapperService().types().size()); @@ -288,14 +289,14 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter if (types.isEmpty()) { return Queries.newMatchNoDocsFilter(); } else if (types.size() == 1) { - return Queries.wrap(new TermQuery(new Term(names.indexName(), Uid.createUidAsBytes(types.get(0), bValue)))); + return new QueryWrapperFilter(new TermQuery(new Term(names.indexName(), Uid.createUidAsBytes(types.get(0), bValue)))); } else { // we use all non child types, cause we don't know if its exact or not... List typesValues = new ArrayList<>(types.size()); for (String type : context.mapperService().types()) { typesValues.add(Uid.createUidAsBytes(type, bValue)); } - return Queries.wrap(new TermsQuery(names.indexName(), typesValues)); + return new QueryWrapperFilter(new TermsQuery(names.indexName(), typesValues)); } } @@ -328,7 +329,7 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter } } } - return Queries.wrap(new TermsQuery(names.indexName(), bValues)); + return new QueryWrapperFilter(new TermsQuery(names.indexName(), bValues)); } /** diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java index 83cdbf536b2..b5668761c03 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java @@ -28,13 +28,13 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.fielddata.FieldDataType; @@ -133,15 +133,15 @@ public class TypeFieldMapper extends AbstractFieldMapper implements Inte @Override public Query termQuery(Object value, @Nullable QueryParseContext context) { - return new ConstantScoreQuery(context.cacheFilter(termFilter(value, context), null, context.autoFilterCachePolicy())); + return new ConstantScoreQuery(termFilter(value, context)); } @Override public Filter termFilter(Object value, @Nullable QueryParseContext context) { if (fieldType.indexOptions() == IndexOptions.NONE) { - return Queries.wrap(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value))))); + return new QueryWrapperFilter(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value))))); } - return Queries.wrap(new TermQuery(names().createIndexNameTerm(BytesRefs.toBytesRef(value)))); + return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(BytesRefs.toBytesRef(value)))); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index 9f69e83edfe..64ba3bace25 100644 --- a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -35,7 +36,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -253,7 +253,7 @@ public class IpFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -273,7 +273,7 @@ public class IpFieldMapper extends NumberFieldMapper { return null; } final long value = ipToLong(nullValue); - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, value, value, true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index 9d591156b99..8004bb38e7c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper.object; import com.google.common.collect.Iterables; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; @@ -29,7 +30,6 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.CopyOnWriteHashMap; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -367,7 +367,7 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea } this.nestedTypePathAsString = "__" + fullPath; this.nestedTypePathAsBytes = new BytesRef(nestedTypePathAsString); - this.nestedTypeFilter = Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, nestedTypePathAsBytes))); + this.nestedTypeFilter = new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, nestedTypePathAsBytes))); } @Override diff --git a/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java b/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java index fd4cce1c763..0bde0193768 100644 --- a/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java +++ b/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.percolator; import org.apache.lucene.index.Term; -import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; @@ -28,7 +27,6 @@ import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -93,7 +91,7 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple private CloseableThreadLocal cache = new CloseableThreadLocal() { @Override protected QueryParseContext initialValue() { - return new QueryParseContext(shardId.index(), queryParserService, true); + return new QueryParseContext(shardId.index(), queryParserService); } }; @@ -280,13 +278,7 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple shard.refresh("percolator_load_queries"); // Maybe add a mode load? This isn't really a write. We need write b/c state=post_recovery try (Engine.Searcher searcher = shard.acquireSearcher("percolator_load_queries", true)) { - Query query = new ConstantScoreQuery( - indexCache.filter().cache( - Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME))), - null, - queryParserService.autoFilterCachePolicy() - ) - ); + Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME)); QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService); searcher.searcher().search(query, queryCollector); Map queries = queryCollector.queries(); diff --git a/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java index c69f7c8ef0f..87611a03b73 100644 --- a/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java @@ -34,9 +34,6 @@ public class AndFilterBuilder extends BaseFilterBuilder { private ArrayList filters = Lists.newArrayList(); - private Boolean cache; - private String cacheKey; - private String filterName; public AndFilterBuilder(FilterBuilder... filters) { @@ -53,19 +50,6 @@ public class AndFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public AndFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public AndFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the filter name for the filter that can be used when searching for matched_filters per hit. */ @@ -82,12 +66,6 @@ public class AndFilterBuilder extends BaseFilterBuilder { filter.toXContent(builder, params); } builder.endArray(); - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (filterName != null) { builder.field("_name", filterName); } diff --git a/src/main/java/org/elasticsearch/index/query/AndFilterParser.java b/src/main/java/org/elasticsearch/index/query/AndFilterParser.java index 02322db9a0b..f0c8c2724bb 100644 --- a/src/main/java/org/elasticsearch/index/query/AndFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/AndFilterParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -56,9 +54,6 @@ public class AndFilterParser implements FilterParser { ArrayList filters = newArrayList(); boolean filtersFound = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; - String filterName = null; String currentFieldName = null; XContentParser.Token token = parser.currentToken(); @@ -74,6 +69,8 @@ public class AndFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { if ("filters".equals(currentFieldName)) { filtersFound = true; @@ -93,12 +90,8 @@ public class AndFilterParser implements FilterParser { } } } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[and] filter does not support [" + currentFieldName + "]"); } @@ -120,10 +113,7 @@ public class AndFilterParser implements FilterParser { for (Filter filter : filters) { boolQuery.add(filter, Occur.MUST); } - Filter filter = Queries.wrap(boolQuery); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(boolQuery); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java index f4982f12f69..330adaf8d08 100644 --- a/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java @@ -36,9 +36,6 @@ public class BoolFilterBuilder extends BaseFilterBuilder { private ArrayList shouldClauses = new ArrayList<>(); - private Boolean cache; - private String cacheKey; - private String filterName; /** @@ -114,19 +111,6 @@ public class BoolFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public BoolFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public BoolFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("bool"); @@ -137,12 +121,6 @@ public class BoolFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java b/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java index 71f8b8248f7..7d96f1bc3bc 100644 --- a/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -52,9 +50,6 @@ public class BoolFilterParser implements FilterParser { BooleanQuery boolFilter = new BooleanQuery(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; - String filterName = null; String currentFieldName = null; XContentParser.Token token; @@ -64,6 +59,8 @@ public class BoolFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("must".equals(currentFieldName)) { hasAnyFilter = true; @@ -117,12 +114,8 @@ public class BoolFilterParser implements FilterParser { throw new QueryParsingException(parseContext, "[bool] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[bool] filter does not support [" + currentFieldName + "]"); } @@ -138,10 +131,7 @@ public class BoolFilterParser implements FilterParser { return null; } - Filter filter = Queries.wrap(boolFilter); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(boolFilter); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java index d89ff05b7fa..593643abc52 100644 --- a/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -55,14 +53,14 @@ public class ConstantScoreQueryParser implements QueryParser { Query query = null; boolean queryFound = false; float boost = 1.0f; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String currentFieldName = null; XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("filter".equals(currentFieldName)) { filter = parseContext.parseInnerFilter(); @@ -76,10 +74,6 @@ public class ConstantScoreQueryParser implements QueryParser { } else if (token.isValue()) { if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[constant_score] query does not support [" + currentFieldName + "]"); } @@ -94,11 +88,6 @@ public class ConstantScoreQueryParser implements QueryParser { } if (filter != null) { - // cache the filter if possible needed - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - Query query1 = new ConstantScoreQuery(filter); query1.setBoost(boost); return query1; diff --git a/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java b/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java index 008f554a57f..8dc0a3eb2c1 100644 --- a/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.HashedBytesRef; @@ -120,11 +121,7 @@ public class ExistsFilterParser implements FilterParser { boolFilter.add(filter, BooleanClause.Occur.SHOULD); } - Filter filter = Queries.wrap(boolFilter); - // we always cache this one, really does not change... (exists) - // its ok to cache under the fieldName cacheKey, since its per segment and the mapping applies to this data on this segment... - filter = parseContext.cacheFilter(filter, new HashedBytesRef("$exists$" + fieldPattern), parseContext.autoFilterCachePolicy()); - + Filter filter = new QueryWrapperFilter(boolFilter); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java b/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java index d31e2f1a943..b349dd3e65c 100644 --- a/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java @@ -21,10 +21,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -52,8 +50,6 @@ public class FQueryFilterParser implements FilterParser { Query query = null; boolean queryFound = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; @@ -61,6 +57,8 @@ public class FQueryFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("query".equals(currentFieldName)) { queryFound = true; @@ -71,10 +69,6 @@ public class FQueryFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.autoFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[fquery] filter does not support [" + currentFieldName + "]"); } @@ -86,10 +80,7 @@ public class FQueryFilterParser implements FilterParser { if (query == null) { return null; } - Filter filter = Queries.wrap(query, parseContext); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(query); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/FilterBuilder.java b/src/main/java/org/elasticsearch/index/query/FilterBuilder.java index 9b3a89976c5..77eb4d136ca 100644 --- a/src/main/java/org/elasticsearch/index/query/FilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/FilterBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentType; diff --git a/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java b/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java index f6ec14313b1..9e2f8e133b3 100644 --- a/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java @@ -23,9 +23,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; @@ -55,8 +53,6 @@ public class FilteredQueryParser implements QueryParser { Filter filter = null; boolean filterFound = false; float boost = 1.0f; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String queryName = null; String currentFieldName = null; @@ -66,6 +62,8 @@ public class FilteredQueryParser implements QueryParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("query".equals(currentFieldName)) { query = parseContext.parseInnerQuery(); @@ -99,10 +97,6 @@ public class FilteredQueryParser implements QueryParser { queryName = parser.text(); } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[filtered] query does not support [" + currentFieldName + "]"); } @@ -129,11 +123,6 @@ public class FilteredQueryParser implements QueryParser { return query; } - // cache if required - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - // if its a match_all query, use constant_score if (Queries.isConstantMatchAllQuery(query)) { Query q = new ConstantScoreQuery(filter); diff --git a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java index 5efedc86c1e..40b41c7ffbd 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java @@ -42,9 +42,6 @@ public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder { private double[] box = {Double.NaN, Double.NaN, Double.NaN, Double.NaN}; - private Boolean cache; - private String cacheKey; - private String filterName; private String type; @@ -140,19 +137,6 @@ public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoBoundingBoxFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoBoundingBoxFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the type of executing of the geo bounding box. Can be either `memory` or `indexed`. Defaults * to `memory`. @@ -185,12 +169,6 @@ public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (type != null) { builder.field("type", type); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java index 107e3a507dd..6441b8d4b07 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java @@ -20,12 +20,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; import org.elasticsearch.index.mapper.FieldMapper; @@ -72,8 +70,6 @@ public class GeoBoundingBoxFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; double top = Double.NaN; @@ -100,7 +96,9 @@ public class GeoBoundingBoxFilterParser implements FilterParser { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); token = parser.nextToken(); - if (FIELD.equals(currentFieldName)) { + if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip + } else if (FIELD.equals(currentFieldName)) { fieldName = parser.text(); } else if (TOP.equals(currentFieldName)) { top = parser.doubleValue(); @@ -138,10 +136,6 @@ public class GeoBoundingBoxFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("normalize".equals(currentFieldName)) { normalize = parser.booleanValue(); } else if ("type".equals(currentFieldName)) { @@ -188,9 +182,6 @@ public class GeoBoundingBoxFilterParser implements FilterParser { + "] not supported, either 'indexed' or 'memory' are allowed"); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java index 6a07c285c1a..a45aee92c6c 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java @@ -45,9 +45,6 @@ public class GeoDistanceFilterBuilder extends BaseFilterBuilder { private String optimizeBbox; - private Boolean cache; - private String cacheKey; - private String filterName; public GeoDistanceFilterBuilder(String name) { @@ -103,19 +100,6 @@ public class GeoDistanceFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoDistanceFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoDistanceFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(GeoDistanceFilterParser.NAME); @@ -134,12 +118,6 @@ public class GeoDistanceFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java index a7859977388..ff46d591f3c 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java @@ -20,13 +20,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; @@ -64,8 +62,6 @@ public class GeoDistanceFilterParser implements FilterParser { XContentParser.Token token; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; GeoPoint point = new GeoPoint(); @@ -80,6 +76,8 @@ public class GeoDistanceFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { fieldName = currentFieldName; GeoUtils.parseGeoPoint(parser, point); @@ -125,10 +123,6 @@ public class GeoDistanceFilterParser implements FilterParser { fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length()); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) { optimizeBbox = parser.textOrNull(); } else if ("normalize".equals(currentFieldName)) { @@ -167,9 +161,6 @@ public class GeoDistanceFilterParser implements FilterParser { IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); Filter filter = new GeoDistanceFilter(point.lat(), point.lon(), distance, geoDistance, indexFieldData, geoMapper, optimizeBbox); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java index 343e50e3efb..c21cd3d62d7 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java @@ -45,9 +45,6 @@ public class GeoDistanceRangeFilterBuilder extends BaseFilterBuilder { private GeoDistance geoDistance; - private Boolean cache; - private String cacheKey; - private String filterName; private String optimizeBbox; @@ -139,19 +136,6 @@ public class GeoDistanceRangeFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoDistanceRangeFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoDistanceRangeFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(GeoDistanceRangeFilterParser.NAME); @@ -173,12 +157,6 @@ public class GeoDistanceRangeFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java index 113c59d2c83..9322a230c01 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java @@ -20,13 +20,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; @@ -64,8 +62,6 @@ public class GeoDistanceRangeFilterParser implements FilterParser { XContentParser.Token token; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; GeoPoint point = new GeoPoint(); @@ -82,6 +78,8 @@ public class GeoDistanceRangeFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { GeoUtils.parseGeoPoint(parser, point); fieldName = currentFieldName; @@ -155,10 +153,6 @@ public class GeoDistanceRangeFilterParser implements FilterParser { fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length()); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) { optimizeBbox = parser.textOrNull(); } else if ("normalize".equals(currentFieldName)) { @@ -206,9 +200,6 @@ public class GeoDistanceRangeFilterParser implements FilterParser { IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); Filter filter = new GeoDistanceRangeFilter(point, from, to, includeLower, includeUpper, geoDistance, geoMapper, indexFieldData, optimizeBbox); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java index e32a1e58e1b..fd0a2f569c4 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import com.google.common.collect.Lists; + import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -38,9 +39,6 @@ public class GeoPolygonFilterBuilder extends BaseFilterBuilder { private final List shell = Lists.newArrayList(); - private Boolean cache; - private String cacheKey; - private String filterName; public GeoPolygonFilterBuilder(String name) { @@ -75,19 +73,6 @@ public class GeoPolygonFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoPolygonFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoPolygonFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(GeoPolygonFilterParser.NAME); @@ -103,12 +88,6 @@ public class GeoPolygonFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java index e63c6012ede..f3f41ac7126 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java @@ -22,11 +22,9 @@ package org.elasticsearch.index.query; import com.google.common.collect.Lists; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; @@ -68,8 +66,6 @@ public class GeoPolygonFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; List shell = Lists.newArrayList(); @@ -84,6 +80,8 @@ public class GeoPolygonFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; @@ -107,10 +105,6 @@ public class GeoPolygonFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("normalize".equals(currentFieldName)) { normalizeLat = parser.booleanValue(); normalizeLon = parser.booleanValue(); @@ -154,9 +148,6 @@ public class GeoPolygonFilterParser implements FilterParser { IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); Filter filter = new GeoPolygonFilter(indexFieldData, shell.toArray(new GeoPoint[shell.size()])); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java index 4ff26d7aacd..1ac7b14481c 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java @@ -37,9 +37,6 @@ public class GeoShapeFilterBuilder extends BaseFilterBuilder { private SpatialStrategy strategy = null; - private Boolean cache; - private String cacheKey; - private String filterName; private final String indexedShapeId; @@ -93,28 +90,6 @@ public class GeoShapeFilterBuilder extends BaseFilterBuilder { this.indexedShapeType = indexedShapeType; } - /** - * Sets whether the filter will be cached. - * - * @param cache Whether filter will be cached - * @return this - */ - public GeoShapeFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - /** - * Sets the key used for the filter if it is cached - * - * @param cacheKey Key for the Filter if cached - * @return this - */ - public GeoShapeFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the name of the filter * @@ -205,12 +180,6 @@ public class GeoShapeFilterBuilder extends BaseFilterBuilder { if (name != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java index 5a5e45736cd..bba22882d27 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java @@ -24,15 +24,13 @@ import com.spatial4j.core.shape.Shape; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.spatial.prefix.PrefixTreeStrategy; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.internal.Nullable; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; @@ -85,8 +83,6 @@ public class GeoShapeFilterParser implements FilterParser { ShapeRelation shapeRelation = ShapeRelation.INTERSECTS; String strategyName = null; ShapeBuilder shape = null; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String id = null; @@ -100,6 +96,8 @@ public class GeoShapeFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; @@ -147,10 +145,6 @@ public class GeoShapeFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[geo_shape] filter does not support [" + currentFieldName + "]"); } @@ -189,15 +183,11 @@ public class GeoShapeFilterParser implements FilterParser { Filter intersects = strategy.makeFilter(GeoShapeQueryParser.getArgs(shape, ShapeRelation.INTERSECTS)); bool.add(exists, BooleanClause.Occur.MUST); bool.add(intersects, BooleanClause.Occur.MUST_NOT); - filter = Queries.wrap(bool); + filter = new QueryWrapperFilter(bool); } else { filter = strategy.makeFilter(GeoShapeQueryParser.getArgs(shape, shapeRelation)); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java index 63ca22db644..e390bf53ea7 100644 --- a/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java +++ b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -28,7 +27,6 @@ import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -62,8 +60,6 @@ public class GeohashCellFilter { public static final String NAME = "geohash_cell"; public static final String NEIGHBORS = "neighbors"; public static final String PRECISION = "precision"; - public static final String CACHE = "_cache"; - public static final String CACHE_KEY = "_cache_key"; /** * Create a new geohash filter for a given set of geohashes. In general this method @@ -103,8 +99,6 @@ public class GeohashCellFilter { private String geohash; private int levels = -1; private boolean neighbors; - private Boolean cache; - private String cacheKey; public Builder(String field) { @@ -161,19 +155,6 @@ public class GeohashCellFilter { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public Builder cache(boolean cache) { - this.cache = cache; - return this; - } - - public Builder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); @@ -183,12 +164,6 @@ public class GeohashCellFilter { if(levels > 0) { builder.field(PRECISION, levels); } - if (cache != null) { - builder.field(CACHE, cache); - } - if (cacheKey != null) { - builder.field(CACHE_KEY, cacheKey); - } builder.field(field, geohash); builder.endObject(); @@ -214,8 +189,6 @@ public class GeohashCellFilter { String geohash = null; int levels = -1; boolean neighbors = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; XContentParser.Token token; @@ -227,7 +200,9 @@ public class GeohashCellFilter { if (token == Token.FIELD_NAME) { String field = parser.text(); - if (PRECISION.equals(field)) { + if (parseContext.isDeprecatedSetting(field)) { + // skip + } else if (PRECISION.equals(field)) { token = parser.nextToken(); if(token == Token.VALUE_NUMBER) { levels = parser.intValue(); @@ -238,12 +213,6 @@ public class GeohashCellFilter { } else if (NEIGHBORS.equals(field)) { parser.nextToken(); neighbors = parser.booleanValue(); - } else if (CACHE.equals(field)) { - parser.nextToken(); - cache = parseContext.parseFilterCachePolicy(); - } else if (CACHE_KEY.equals(field)) { - parser.nextToken(); - cacheKey = new HashedBytesRef(parser.text()); } else { fieldName = field; token = parser.nextToken(); @@ -296,10 +265,6 @@ public class GeohashCellFilter { filter = create(parseContext, geoMapper, geohash, null); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - return filter; } } diff --git a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java index d22a05f6a11..8bf761b84a0 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -34,7 +35,6 @@ import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; import org.elasticsearch.index.search.child.ChildrenConstantScoreQuery; import org.elasticsearch.index.search.child.ChildrenQuery; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; import org.elasticsearch.search.internal.SubSearchContext; @@ -80,6 +80,8 @@ public class HasChildFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { // Usually, the query would be parsed here, but the child // type may not have been extracted yet, so use the @@ -101,10 +103,6 @@ public class HasChildFilterParser implements FilterParser { childType = parser.text(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - // noop to be backwards compatible - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - // noop to be backwards compatible } else if ("short_circuit_cutoff".equals(currentFieldName)) { shortCircuitParentDocSet = parser.intValue(); } else if ("min_children".equals(currentFieldName) || "minChildren".equals(currentFieldName)) { @@ -150,7 +148,7 @@ public class HasChildFilterParser implements FilterParser { String parentType = parentFieldMapper.type(); // wrap the query with type query - query = new FilteredQuery(query, parseContext.cacheFilter(childDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); + query = new FilteredQuery(query, childDocMapper.typeFilter()); DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType); if (parentDocMapper == null) { @@ -167,7 +165,7 @@ public class HasChildFilterParser implements FilterParser { nonNestedDocsFilter = parseContext.bitsetFilter(Queries.newNonNestedFilter()); } - Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy()); + Filter parentFilter = parentDocMapper.typeFilter(); ParentChildIndexFieldData parentChildIndexFieldData = parseContext.getForField(parentFieldMapper); Query childrenQuery; @@ -178,9 +176,9 @@ public class HasChildFilterParser implements FilterParser { shortCircuitParentDocSet, nonNestedDocsFilter); } if (filterName != null) { - parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(childrenQuery)); + parseContext.addNamedFilter(filterName, new QueryWrapperFilter(childrenQuery)); } - return new CustomQueryWrappingFilter(childrenQuery); + return new QueryWrapperFilter(childrenQuery); } } diff --git a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java index e088b58a51a..2e55395535c 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; import org.elasticsearch.index.search.child.ChildrenConstantScoreQuery; import org.elasticsearch.index.search.child.ChildrenQuery; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; import org.elasticsearch.search.internal.SubSearchContext; @@ -166,10 +165,10 @@ public class HasChildQueryParser implements QueryParser { } // wrap the query with type query - innerQuery = new FilteredQuery(innerQuery, parseContext.cacheFilter(childDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); + innerQuery = new FilteredQuery(innerQuery, childDocMapper.typeFilter()); Query query; - Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy()); + Filter parentFilter = parentDocMapper.typeFilter(); ParentChildIndexFieldData parentChildIndexFieldData = parseContext.getForField(parentFieldMapper); if (minChildren > 1 || maxChildren > 0 || scoreType != ScoreType.NONE) { query = new ChildrenQuery(parentChildIndexFieldData, parentType, childType, parentFilter, innerQuery, scoreType, minChildren, @@ -179,7 +178,7 @@ public class HasChildQueryParser implements QueryParser { shortCircuitParentDocSet, nonNestedDocsFilter); } if (queryName != null) { - parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query)); + parseContext.addNamedQuery(queryName, query); } query.setBoost(boost); return query; diff --git a/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java index 62a96debf8c..8f565022c4f 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java @@ -62,20 +62,6 @@ public class HasParentFilterBuilder extends BaseFilterBuilder { return this; } - /** - * This is a noop since has_parent can't be cached. - */ - public HasParentFilterBuilder cache(boolean cache) { - return this; - } - - /** - * This is a noop since has_parent can't be cached. - */ - public HasParentFilterBuilder cacheKey(String cacheKey) { - return this; - } - /** * Sets inner hit definition in the scope of this filter and reusing the defined type and query. */ diff --git a/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java index 388f24d4ab0..331f575df77 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java @@ -20,13 +20,13 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; @@ -69,6 +69,8 @@ public class HasParentFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { // Usually, the query would be parsed here, but the child // type may not have been extracted yet, so use the @@ -90,10 +92,6 @@ public class HasParentFilterParser implements FilterParser { parentType = parser.text(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - // noop to be backwards compatible - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - // noop to be backwards compatible } else { throw new QueryParsingException(parseContext, "[has_parent] filter does not support [" + currentFieldName + "]"); } @@ -122,9 +120,9 @@ public class HasParentFilterParser implements FilterParser { return null; } if (filterName != null) { - parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(parentQuery)); + parseContext.addNamedFilter(filterName, new QueryWrapperFilter(parentQuery)); } - return new CustomQueryWrappingFilter(parentQuery); + return new QueryWrapperFilter(parentQuery); } } diff --git a/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java index d7d57b6ddd6..0d718efdb69 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; @@ -33,7 +34,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ParentConstantScoreQuery; import org.elasticsearch.index.search.child.ParentQuery; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; @@ -136,7 +136,7 @@ public class HasParentQueryParser implements QueryParser { query.setBoost(boost); if (queryName != null) { - parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query)); + parseContext.addNamedQuery(queryName, query); } return query; } @@ -186,7 +186,7 @@ public class HasParentQueryParser implements QueryParser { parentsFilter.add(documentMapper.typeFilter(), BooleanClause.Occur.SHOULD); } } - parentFilter = Queries.wrap(parentsFilter); + parentFilter = new QueryWrapperFilter(parentsFilter); } if (parentFilter == null) { @@ -194,8 +194,8 @@ public class HasParentQueryParser implements QueryParser { } // wrap the query with type query - innerQuery = new FilteredQuery(innerQuery, parseContext.cacheFilter(parentDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); - Filter childrenFilter = parseContext.cacheFilter(Queries.wrap(Queries.not(parentFilter)), null, parseContext.autoFilterCachePolicy()); + innerQuery = new FilteredQuery(innerQuery, parentDocMapper.typeFilter()); + Filter childrenFilter = new QueryWrapperFilter(Queries.not(parentFilter)); if (score) { return new ParentQuery(parentChildIndexFieldData, innerQuery, parentDocMapper.type(), childrenFilter); } else { diff --git a/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java index 138557cd79a..23d4c9c1483 100644 --- a/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java @@ -24,6 +24,7 @@ import com.google.common.collect.Iterables; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.Queries; @@ -109,7 +110,7 @@ public class IdsFilterParser implements FilterParser { types = parseContext.mapperService().types(); } - Filter filter = Queries.wrap(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(types, ids))); + Filter filter = new QueryWrapperFilter(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(types, ids))); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java b/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java index e2bcd353e11..4d04e8e675b 100644 --- a/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java +++ b/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java @@ -23,7 +23,6 @@ import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -94,8 +93,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { final BitsetFilterCache bitsetFilterCache; - final QueryCachingPolicy autoFilterCachePolicy; - private final Map queryParsers; private final Map filterParsers; @@ -111,7 +108,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { ScriptService scriptService, AnalysisService analysisService, MapperService mapperService, IndexCache indexCache, IndexFieldDataService fieldDataService, BitsetFilterCache bitsetFilterCache, - QueryCachingPolicy autoFilterCachePolicy, @Nullable SimilarityService similarityService, @Nullable Map namedQueryParsers, @Nullable Map namedFilterParsers) { @@ -123,7 +119,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { this.indexCache = indexCache; this.fieldDataService = fieldDataService; this.bitsetFilterCache = bitsetFilterCache; - this.autoFilterCachePolicy = autoFilterCachePolicy; this.defaultField = indexSettings.get(DEFAULT_FIELD, AllFieldMapper.NAME); this.queryStringLenient = indexSettings.getAsBoolean(QUERY_STRING_LENIENT, false); @@ -185,10 +180,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { return this.defaultField; } - public QueryCachingPolicy autoFilterCachePolicy() { - return autoFilterCachePolicy; - } - public boolean queryStringLenient() { return this.queryStringLenient; } diff --git a/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java b/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java index 3f394ff735e..07af9717dc1 100644 --- a/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.HashedBytesRef; @@ -143,13 +144,8 @@ public class MissingFilterParser implements FilterParser { boolFilter.add(filter, BooleanClause.Occur.SHOULD); } - // we always cache this one, really does not change... (exists) - // its ok to cache under the fieldName cacheKey, since its per segment and the mapping applies to this data on this segment... - existenceFilter = Queries.wrap(boolFilter); - existenceFilter = parseContext.cacheFilter(existenceFilter, new HashedBytesRef("$exists$" + fieldPattern), parseContext.autoFilterCachePolicy()); - existenceFilter = Queries.wrap(Queries.not(existenceFilter)); - // cache the not filter as well, so it will be faster - existenceFilter = parseContext.cacheFilter(existenceFilter, new HashedBytesRef("$missing$" + fieldPattern), parseContext.autoFilterCachePolicy()); + existenceFilter = new QueryWrapperFilter(boolFilter); + existenceFilter = new QueryWrapperFilter(Queries.not(existenceFilter));; } if (nullValue) { @@ -157,10 +153,6 @@ public class MissingFilterParser implements FilterParser { MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(field); if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) { nullFilter = smartNameFieldMappers.mapper().nullValueFilter(); - if (nullFilter != null) { - // cache the not filter as well, so it will be faster - nullFilter = parseContext.cacheFilter(nullFilter, new HashedBytesRef("$null$" + fieldPattern), parseContext.autoFilterCachePolicy()); - } } } } @@ -172,7 +164,7 @@ public class MissingFilterParser implements FilterParser { combined.add(existenceFilter, BooleanClause.Occur.SHOULD); combined.add(nullFilter, BooleanClause.Occur.SHOULD); // cache the not filter as well, so it will be faster - filter = parseContext.cacheFilter(Queries.wrap(combined), null, parseContext.autoFilterCachePolicy()); + filter = new QueryWrapperFilter(combined); } else { filter = nullFilter; } diff --git a/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java index 0b15bbfcf14..c64c7ef327e 100644 --- a/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java @@ -32,8 +32,6 @@ public class NestedFilterBuilder extends BaseFilterBuilder { private final String path; private Boolean join; - private Boolean cache; - private String cacheKey; private String filterName; private QueryInnerHitBuilder innerHit = null; @@ -55,19 +53,6 @@ public class NestedFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public NestedFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public NestedFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the filter name for the filter that can be used when searching for matched_filters per hit. */ @@ -101,12 +86,6 @@ public class NestedFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (innerHit != null) { builder.startObject("inner_hits"); builder.value(innerHit); diff --git a/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java b/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java index fc2237d6630..467705afca7 100644 --- a/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java @@ -20,12 +20,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ToParentBlockJoinQuery; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; @@ -53,8 +52,6 @@ public class NestedFilterParser implements FilterParser { final NestedQueryParser.ToBlockJoinQueryBuilder builder = new NestedQueryParser.ToBlockJoinQueryBuilder(parseContext); float boost = 1.0f; - boolean cache = false; - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; @@ -62,6 +59,8 @@ public class NestedFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("query".equals(currentFieldName)) { builder.query(); @@ -79,10 +78,6 @@ public class NestedFilterParser implements FilterParser { boost = parser.floatValue(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parser.booleanValue(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[nested] filter does not support [" + currentFieldName + "]"); } @@ -92,10 +87,7 @@ public class NestedFilterParser implements FilterParser { ToParentBlockJoinQuery joinQuery = builder.build(); if (joinQuery != null) { joinQuery.getChildQuery().setBoost(boost); - Filter nestedFilter = Queries.wrap(joinQuery, parseContext); - if (cache) { - nestedFilter = parseContext.cacheFilter(nestedFilter, cacheKey, parseContext.autoFilterCachePolicy()); - } + Filter nestedFilter = new QueryWrapperFilter(joinQuery); if (filterName != null) { parseContext.addNamedFilter(filterName, nestedFilter); } diff --git a/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java index f835b025f9c..1c5ea06f077 100644 --- a/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java @@ -32,22 +32,12 @@ public class NotFilterBuilder extends BaseFilterBuilder { private FilterBuilder filter; - private Boolean cache; - private String filterName; public NotFilterBuilder(FilterBuilder filter) { this.filter = filter; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public NotFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - public NotFilterBuilder filterName(String filterName) { this.filterName = filterName; return this; @@ -58,9 +48,6 @@ public class NotFilterBuilder extends BaseFilterBuilder { builder.startObject(NotFilterParser.NAME); builder.field("filter"); filter.toXContent(builder, params); - if (cache != null) { - builder.field("_cache", cache); - } if (filterName != null) { builder.field("_name", filterName); } diff --git a/src/main/java/org/elasticsearch/index/query/NotFilterParser.java b/src/main/java/org/elasticsearch/index/query/NotFilterParser.java index 38bff1997bb..7ebf0fe82dc 100644 --- a/src/main/java/org/elasticsearch/index/query/NotFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/NotFilterParser.java @@ -20,8 +20,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; @@ -49,8 +49,6 @@ public class NotFilterParser implements FilterParser { Filter filter = null; boolean filterFound = false; - boolean cache = false; - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; @@ -58,6 +56,8 @@ public class NotFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("filter".equals(currentFieldName)) { filter = parseContext.parseInnerFilter(); @@ -72,12 +72,8 @@ public class NotFilterParser implements FilterParser { // its the filter, and the name is the field filter = parseContext.parseInnerFilter(currentFieldName); } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parser.booleanValue(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[not] filter does not support [" + currentFieldName + "]"); } @@ -92,10 +88,7 @@ public class NotFilterParser implements FilterParser { return null; } - Filter notFilter = Queries.wrap(Queries.not(filter)); - if (cache) { - notFilter = parseContext.cacheFilter(notFilter, cacheKey, parseContext.autoFilterCachePolicy()); - } + Filter notFilter = new QueryWrapperFilter(Queries.not(filter)); if (filterName != null) { parseContext.addNamedFilter(filterName, notFilter); } diff --git a/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java index 04d516b00b7..2feca66fc56 100644 --- a/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java @@ -34,9 +34,6 @@ public class OrFilterBuilder extends BaseFilterBuilder { private ArrayList filters = Lists.newArrayList(); - private Boolean cache; - private String cacheKey; - private String filterName; public OrFilterBuilder(FilterBuilder... filters) { @@ -53,19 +50,6 @@ public class OrFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public OrFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public OrFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - public OrFilterBuilder filterName(String filterName) { this.filterName = filterName; return this; @@ -79,12 +63,6 @@ public class OrFilterBuilder extends BaseFilterBuilder { filter.toXContent(builder, params); } builder.endArray(); - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (filterName != null) { builder.field("_name", filterName); } diff --git a/src/main/java/org/elasticsearch/index/query/OrFilterParser.java b/src/main/java/org/elasticsearch/index/query/OrFilterParser.java index 22932ac8290..dae526563c6 100644 --- a/src/main/java/org/elasticsearch/index/query/OrFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/OrFilterParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -56,9 +54,6 @@ public class OrFilterParser implements FilterParser { ArrayList filters = newArrayList(); boolean filtersFound = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; - String filterName = null; String currentFieldName = null; XContentParser.Token token = parser.currentToken(); @@ -93,12 +88,8 @@ public class OrFilterParser implements FilterParser { } } } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[or] filter does not support [" + currentFieldName + "]"); } @@ -119,10 +110,7 @@ public class OrFilterParser implements FilterParser { for (Filter filter : filters) { boolQuery.add(filter, Occur.SHOULD); } - Filter filter = Queries.wrap(boolQuery); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(boolQuery); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java index bb41e4f104a..d202fcc281f 100644 --- a/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java @@ -35,9 +35,6 @@ public class PrefixFilterBuilder extends BaseFilterBuilder { private final String prefix; - private Boolean cache; - private String cacheKey; - private String filterName; /** @@ -60,19 +57,6 @@ public class PrefixFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public PrefixFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public PrefixFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(PrefixFilterParser.NAME); @@ -80,12 +64,6 @@ public class PrefixFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java b/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java index c6bf3fe0a95..d1f291e4606 100644 --- a/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java @@ -22,11 +22,9 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; import org.apache.lucene.search.PrefixQuery; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -52,8 +50,6 @@ public class PrefixFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; Object value = null; @@ -66,10 +62,6 @@ public class PrefixFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { fieldName = currentFieldName; value = parser.objectBytes(); @@ -88,12 +80,9 @@ public class PrefixFilterParser implements FilterParser { filter = smartNameFieldMappers.mapper().prefixFilter(value, parseContext); } if (filter == null) { - filter = Queries.wrap(new PrefixQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); + filter = new QueryWrapperFilter(new PrefixQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java index cf5db0f0ac1..c4af2419b5c 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java @@ -32,8 +32,6 @@ public class QueryFilterBuilder extends BaseFilterBuilder { private final QueryBuilder queryBuilder; - private Boolean cache; - private String filterName; /** @@ -53,17 +51,9 @@ public class QueryFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public QueryFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { - if (filterName == null && cache == null) { + if (filterName == null) { builder.field(QueryFilterParser.NAME); queryBuilder.toXContent(builder, params); } else { @@ -73,9 +63,6 @@ public class QueryFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java b/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java index aaa5a9d1e99..bdc09dbee78 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java @@ -21,8 +21,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.Queries; import java.io.IOException; @@ -48,6 +48,6 @@ public class QueryFilterParser implements FilterParser { if (query == null) { return null; } - return Queries.wrap(query, parseContext); + return new QueryWrapperFilter(query); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java index 39c0543759b..aaf247e90cc 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryParseContext.java +++ b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java @@ -22,25 +22,17 @@ package org.elasticsearch.index.query; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queryparser.classic.MapperQueryParser; import org.apache.lucene.queryparser.classic.QueryParserSettings; -import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.search.similarities.Similarity; -import org.apache.lucene.util.Bits; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.NoCacheFilter; -import org.elasticsearch.common.lucene.search.NoCacheQuery; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.lucene.search.ResolvableFilter; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; @@ -55,7 +47,6 @@ import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.query.support.NestedScope; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; @@ -75,6 +66,9 @@ import java.util.Map; */ public class QueryParseContext { + private static final ParseField CACHE = new ParseField("_cache").withAllDeprecated("Elasticsearch makes its own caching decisions"); + private static final ParseField CACHE_KEY = new ParseField("_cache_key").withAllDeprecated("Filters are always used as cache keys"); + private static ThreadLocal typesContext = new ThreadLocal<>(); public static void setTypes(String[] types) { @@ -97,10 +91,6 @@ public class QueryParseContext { private final Index index; - private boolean propagateNoCache = false; - - private boolean requireCustomQueryWrappingFilter = false; - private final IndexQueryParserService indexQueryParser; private final Map namedFilters = Maps.newHashMap(); @@ -111,8 +101,6 @@ public class QueryParseContext { private EnumSet parseFlags = ParseField.EMPTY_FLAGS; - private final boolean disableFilterCaching; - private boolean allowUnmappedFields; private boolean mapUnmappedFieldAsString; @@ -120,14 +108,8 @@ public class QueryParseContext { private NestedScope nestedScope; public QueryParseContext(Index index, IndexQueryParserService indexQueryParser) { - this(index, indexQueryParser, false); - } - - public QueryParseContext(Index index, IndexQueryParserService indexQueryParser, boolean disableFilterCaching) { this.index = index; this.indexQueryParser = indexQueryParser; - this.propagateNoCache = disableFilterCaching; - this.disableFilterCaching = disableFilterCaching; } public void parseFlags(EnumSet parseFlags) { @@ -144,8 +126,6 @@ public class QueryParseContext { this.lookup = null; this.parser = jp; this.namedFilters.clear(); - this.requireCustomQueryWrappingFilter = false; - this.propagateNoCache = false; this.nestedScope = new NestedScope(); } @@ -190,24 +170,6 @@ public class QueryParseContext { return indexQueryParser.defaultField(); } - public QueryCachingPolicy autoFilterCachePolicy() { - return indexQueryParser.autoFilterCachePolicy(); - } - - public QueryCachingPolicy parseFilterCachePolicy() throws IOException { - final String text = parser.textOrNull(); - if (text == null || text.equals("auto")) { - return autoFilterCachePolicy(); - } else if (parser.booleanValue()) { - // cache without conditions on how many times the filter has been - // used or what the produced DocIdSet looks like, but ONLY on large - // segments to not pollute the cache - return QueryCachingPolicy.CacheOnLargeSegments.DEFAULT; - } else { - return null; - } - } - public boolean queryStringLenient() { return indexQueryParser.queryStringLenient(); } @@ -221,38 +183,6 @@ public class QueryParseContext { return indexQueryParser.bitsetFilterCache.getBitDocIdSetFilter(filter); } - public Filter cacheFilter(Filter filter, final @Nullable HashedBytesRef cacheKey, final QueryCachingPolicy cachePolicy) { - if (filter == null) { - return null; - } - if (this.disableFilterCaching || this.propagateNoCache || filter instanceof NoCacheFilter) { - return filter; - } - if (filter instanceof ResolvableFilter) { - final ResolvableFilter resolvableFilter = (ResolvableFilter) filter; - // We need to wrap it another filter, because this method is invoked at query parse time, which - // may not be during search execution time. (for example index alias filter and percolator) - return new Filter() { - @Override - public DocIdSet getDocIdSet(LeafReaderContext atomicReaderContext, Bits bits) throws IOException { - Filter filter = resolvableFilter.resolve(); - if (filter == null) { - return null; - } - filter = indexQueryParser.indexCache.filter().cache(filter, cacheKey, cachePolicy); - return filter.getDocIdSet(atomicReaderContext, bits); - } - - @Override - public String toString(String field) { - return "AnonymousResolvableFilter"; // TODO: not sure what is going on here - } - }; - } else { - return indexQueryParser.indexCache.filter().cache(filter, cacheKey, cachePolicy); - } - } - public > IFD getForField(FieldMapper mapper) { return indexQueryParser.fieldDataService.getForField(mapper); } @@ -262,7 +192,7 @@ public class QueryParseContext { } public void addNamedQuery(String name, Query query) { - namedFilters.put(name, Queries.wrap(query, this)); + namedFilters.put(name, new QueryWrapperFilter(query)); } public ImmutableMap copyNamedFilters() { @@ -315,16 +245,6 @@ public class QueryParseContext { // if we are at END_OBJECT, move to the next one... parser.nextToken(); } - if (result instanceof NoCacheQuery) { - propagateNoCache = true; - } - if (CustomQueryWrappingFilter.shouldUseCustomQueryWrappingFilter(result)) { - requireCustomQueryWrappingFilter = true; - // If later on, either directly or indirectly this query gets wrapped in a query filter it must never - // get cached even if a filter higher up the chain is configured to do this. This will happen, because - // the result filter will be instance of NoCacheFilter (CustomQueryWrappingFilter) which will in - // #executeFilterParser() set propagateNoCache to true. - } return result; } @@ -357,7 +277,7 @@ public class QueryParseContext { if (filterParser == null) { throw new QueryParsingException(this, "No filter registered for [" + filterName + "]"); } - Filter result = executeFilterParser(filterParser); + Filter result = filterParser.parse(this); if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) { // if we are at END_OBJECT, move to the next one... parser.nextToken(); @@ -370,18 +290,7 @@ public class QueryParseContext { if (filterParser == null) { throw new QueryParsingException(this, "No filter registered for [" + filterName + "]"); } - return executeFilterParser(filterParser); - } - - private Filter executeFilterParser(FilterParser filterParser) throws IOException { - final boolean propagateNoCache = this.propagateNoCache; // first safe the state that we need to restore - this.propagateNoCache = false; // parse the subfilter with caching, that's fine - Filter result = filterParser.parse(this); - // now make sure we set propagateNoCache to true if it is true already or if the result is - // an instance of NoCacheFilter or if we used to be true! all filters above will - // be not cached ie. wrappers of this filter! - this.propagateNoCache |= (result instanceof NoCacheFilter) || propagateNoCache; - return result; + return filterParser.parse(this); } public FieldMapper fieldMapper(String name) { @@ -475,11 +384,14 @@ public class QueryParseContext { return System.currentTimeMillis(); } - public boolean requireCustomQueryWrappingFilter() { - return requireCustomQueryWrappingFilter; - } - public NestedScope nestedScope() { return nestedScope; } + + /** + * Return whether the setting is deprecated. + */ + public boolean isDeprecatedSetting(String setting) { + return CACHE.match(setting) || CACHE_KEY.match(setting); + } } diff --git a/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java index 80149821438..42753179b07 100644 --- a/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java @@ -41,9 +41,6 @@ public class RangeFilterBuilder extends BaseFilterBuilder { private boolean includeUpper = true; - private Boolean cache; - private String cacheKey; - private String filterName; private String execution; @@ -341,19 +338,6 @@ public class RangeFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to true. - */ - public RangeFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public RangeFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the execution mode that controls how the range filter is executed. Valid values are: "index" and "fielddata". *
    @@ -397,12 +381,6 @@ public class RangeFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (execution != null) { builder.field("execution", execution); } diff --git a/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java b/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java index 8b5f557d0ba..83650aecd5d 100644 --- a/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java @@ -20,14 +20,12 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; @@ -57,8 +55,6 @@ public class RangeFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; Object from = null; Object to = null; @@ -74,6 +70,8 @@ public class RangeFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -112,10 +110,6 @@ public class RangeFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("execution".equals(currentFieldName)) { execution = parser.text(); } else { @@ -174,12 +168,9 @@ public class RangeFilterParser implements FilterParser { } if (filter == null) { - filter = Queries.wrap(new TermRangeQuery(fieldName, BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), includeLower, includeUpper)); + filter = new QueryWrapperFilter(new TermRangeQuery(fieldName, BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), includeLower, includeUpper)); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java index f199f83b5a5..f730c084be3 100644 --- a/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java @@ -38,8 +38,6 @@ public class RegexpFilterBuilder extends BaseFilterBuilder { private int maxDeterminizedStates = Operations.DEFAULT_MAX_DETERMINIZED_STATES; private boolean maxDetermizedStatesSet; - private Boolean cache; - private String cacheKey; private String filterName; /** @@ -87,19 +85,6 @@ public class RegexpFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public RegexpFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public RegexpFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(RegexpFilterParser.NAME); @@ -118,12 +103,6 @@ public class RegexpFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java b/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java index 5f1d9174fc7..9612812ffe9 100644 --- a/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java @@ -21,13 +21,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -53,8 +51,6 @@ public class RegexpFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; String secondaryFieldName = null; Object value = null; @@ -68,6 +64,8 @@ public class RegexpFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -91,10 +89,6 @@ public class RegexpFilterParser implements FilterParser { } else { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { secondaryFieldName = currentFieldName; secondaryValue = parser.objectBytes(); @@ -118,11 +112,7 @@ public class RegexpFilterParser implements FilterParser { filter = smartNameFieldMappers.mapper().regexpFilter(value, flagsValue, maxDeterminizedStates, parseContext); } if (filter == null) { - filter = Queries.wrap(new RegexpQuery(new Term(fieldName, BytesRefs.toBytesRef(value)), flagsValue, maxDeterminizedStates)); - } - - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); + filter = new QueryWrapperFilter(new RegexpQuery(new Term(fieldName, BytesRefs.toBytesRef(value)), flagsValue, maxDeterminizedStates)); } if (filterName != null) { diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java index 75ffa386198..89f0fe7f033 100644 --- a/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java @@ -37,9 +37,6 @@ public class ScriptFilterBuilder extends BaseFilterBuilder { private String lang; - private Boolean cache; - private String cacheKey; - private String filterName; public ScriptFilterBuilder(String script) { @@ -79,19 +76,6 @@ public class ScriptFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public ScriptFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public ScriptFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(ScriptFilterParser.NAME); @@ -105,12 +89,6 @@ public class ScriptFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java index 54dbe6cc1db..7e4b59a5748 100644 --- a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java @@ -24,11 +24,9 @@ import org.apache.lucene.search.BitsFilteredDocIdSet; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocValuesDocIdSet; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.util.Bits; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.Script; @@ -41,6 +39,7 @@ import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.util.Map; +import java.util.Objects; import static com.google.common.collect.Maps.newHashMap; @@ -67,8 +66,6 @@ public class ScriptFilterParser implements FilterParser { XContentParser.Token token; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; // also, when caching, since its isCacheable is false, will result in loading all bit set... String script = null; String scriptLang; @@ -81,6 +78,8 @@ public class ScriptFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("params".equals(currentFieldName)) { params = parser.map(); @@ -90,10 +89,6 @@ public class ScriptFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if (!scriptParameterParser.token(currentFieldName, token, parser)){ throw new QueryParsingException(parseContext, "[script] filter does not support [" + currentFieldName + "]"); } @@ -115,9 +110,6 @@ public class ScriptFilterParser implements FilterParser { } Filter filter = new ScriptFilter(scriptLang, script, scriptType, params, parseContext.scriptService(), parseContext.lookup()); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } @@ -150,7 +142,7 @@ public class ScriptFilterParser implements FilterParser { @Override public boolean equals(Object o) { if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; ScriptFilter that = (ScriptFilter) o; @@ -162,8 +154,9 @@ public class ScriptFilterParser implements FilterParser { @Override public int hashCode() { - int result = script != null ? script.hashCode() : 0; - result = 31 * result + (params != null ? params.hashCode() : 0); + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(script); + result = 31 * result + Objects.hashCode(params); return result; } diff --git a/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java index 74349a00603..3ca5069127d 100644 --- a/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java @@ -34,9 +34,6 @@ public class TermFilterBuilder extends BaseFilterBuilder { private final Object value; - private Boolean cache; - private String cacheKey; - private String filterName; /** @@ -108,19 +105,6 @@ public class TermFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to true. - */ - public TermFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public TermFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(TermFilterParser.NAME); @@ -128,12 +112,6 @@ public class TermFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/TermFilterParser.java b/src/main/java/org/elasticsearch/index/query/TermFilterParser.java index ca077b91ee3..0224a6384da 100644 --- a/src/main/java/org/elasticsearch/index/query/TermFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermFilterParser.java @@ -21,12 +21,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -52,8 +50,6 @@ public class TermFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; Object value = null; @@ -63,6 +59,8 @@ public class TermFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { // also support a format of "term" : {"field_name" : { ... }} fieldName = currentFieldName; @@ -76,10 +74,6 @@ public class TermFilterParser implements FilterParser { value = parser.objectBytes(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[term] filter does not support [" + currentFieldName + "]"); } @@ -88,10 +82,6 @@ public class TermFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { fieldName = currentFieldName; value = parser.objectBytes(); @@ -113,11 +103,7 @@ public class TermFilterParser implements FilterParser { filter = smartNameFieldMappers.mapper().termFilter(value, parseContext); } if (filter == null) { - filter = Queries.wrap(new TermQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); - } - - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); + filter = new QueryWrapperFilter(new TermQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); } if (filterName != null) { @@ -125,4 +111,4 @@ public class TermFilterParser implements FilterParser { } return filter; } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java index a6331fb51a6..d753235e341 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java @@ -32,9 +32,6 @@ public class TermsFilterBuilder extends BaseFilterBuilder { private final Object values; - private Boolean cache; - private String cacheKey; - private String filterName; private String execution; @@ -134,19 +131,6 @@ public class TermsFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public TermsFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public TermsFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(TermsFilterParser.NAME); @@ -159,12 +143,6 @@ public class TermsFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java index 46c52b80f64..ce8a8122665 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java @@ -23,14 +23,13 @@ import com.google.common.collect.Lists; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -71,7 +70,6 @@ public class TermsFilterParser implements FilterParser { XContentParser parser = parseContext.parser(); MapperService.SmartNameFieldMappers smartNameFieldMappers; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); String filterName = null; String currentFieldName = null; @@ -81,13 +79,14 @@ public class TermsFilterParser implements FilterParser { String lookupPath = null; String lookupRouting = null; - HashedBytesRef cacheKey = null; XContentParser.Token token; List terms = Lists.newArrayList(); String fieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { if (fieldName != null) { throw new QueryParsingException(parseContext, "[terms] filter does not support multiple fields"); @@ -137,10 +136,6 @@ public class TermsFilterParser implements FilterParser { // ignore } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[terms] filter does not support [" + currentFieldName + "]"); } @@ -181,11 +176,7 @@ public class TermsFilterParser implements FilterParser { for (int i = 0; i < filterValues.length; i++) { filterValues[i] = BytesRefs.toBytesRef(terms.get(i)); } - filter = Queries.wrap(new TermsQuery(fieldName, filterValues)); - } - - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); + filter = new QueryWrapperFilter(new TermsQuery(fieldName, filterValues)); } if (filterName != null) { diff --git a/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java index 1c23c8f338c..1a9473ede40 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java @@ -36,8 +36,6 @@ public class TermsLookupFilterBuilder extends BaseFilterBuilder { private String lookupPath; private Boolean lookupCache; - private Boolean cache; - private String cacheKey; private String filterName; public TermsLookupFilterBuilder(String name) { @@ -94,16 +92,6 @@ public class TermsLookupFilterBuilder extends BaseFilterBuilder { return this; } - public TermsLookupFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public TermsLookupFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(TermsFilterParser.NAME); @@ -126,12 +114,6 @@ public class TermsLookupFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java index 095a849b792..6c1b0e45aaa 100644 --- a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -29,7 +30,6 @@ import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.query.support.XContentStructure; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; import org.elasticsearch.index.search.child.TopChildrenQuery; @@ -130,11 +130,11 @@ public class TopChildrenQueryParser implements QueryParser { innerQuery.setBoost(boost); // wrap the query with type query - innerQuery = new FilteredQuery(innerQuery, parseContext.cacheFilter(childDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); + innerQuery = new FilteredQuery(innerQuery, childDocMapper.typeFilter()); ParentChildIndexFieldData parentChildIndexFieldData = parseContext.getForField(parentFieldMapper); TopChildrenQuery query = new TopChildrenQuery(parentChildIndexFieldData, innerQuery, childType, parentType, scoreType, factor, incrementalFactor, nonNestedDocsFilter); if (queryName != null) { - parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query)); + parseContext.addNamedFilter(queryName, new QueryWrapperFilter(query)); } return query; } diff --git a/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java b/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java index a6248a4e228..6c9e9523e76 100644 --- a/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java @@ -21,10 +21,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; @@ -68,10 +68,10 @@ public class TypeFilterParser implements FilterParser { //LUCENE 4 UPGRADE document mapper should use bytesref as well? DocumentMapper documentMapper = parseContext.mapperService().documentMapper(type.utf8ToString()); if (documentMapper == null) { - filter = Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, type))); + filter = new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, type))); } else { filter = documentMapper.typeFilter(); } - return parseContext.cacheFilter(filter, null, parseContext.autoFilterCachePolicy()); + return filter; } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java b/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java index 2b9363da61f..7b4faa3369e 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java @@ -38,6 +38,7 @@ import org.apache.lucene.search.XFilteredDocIdSetIterator; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.Bits; import org.apache.lucene.util.LongBitSet; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; import org.elasticsearch.index.fielddata.AtomicParentChildFieldData; @@ -52,55 +53,41 @@ import java.util.Set; * */ // TODO: Remove me and move the logic to ChildrenQuery when needsScore=false -public class ChildrenConstantScoreQuery extends Query { +public class ChildrenConstantScoreQuery extends IndexCacheableQuery { private final IndexParentChildFieldData parentChildIndexFieldData; - private Query originalChildQuery; + private final Query childQuery; private final String parentType; private final String childType; private final Filter parentFilter; private final int shortCircuitParentDocSet; private final BitDocIdSetFilter nonNestedDocsFilter; - private Query rewrittenChildQuery; - private IndexReader rewriteIndexReader; - public ChildrenConstantScoreQuery(IndexParentChildFieldData parentChildIndexFieldData, Query childQuery, String parentType, String childType, Filter parentFilter, int shortCircuitParentDocSet, BitDocIdSetFilter nonNestedDocsFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; this.parentFilter = parentFilter; this.parentType = parentType; this.childType = childType; - this.originalChildQuery = childQuery; + this.childQuery = childQuery; this.shortCircuitParentDocSet = shortCircuitParentDocSet; this.nonNestedDocsFilter = nonNestedDocsFilter; } @Override - // See TopChildrenQuery#rewrite public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenChildQuery == null) { - rewrittenChildQuery = originalChildQuery.rewrite(reader); - rewriteIndexReader = reader; + final Query childRewritten = childQuery.rewrite(reader); + if (childRewritten != childQuery) { + ChildrenConstantScoreQuery rewritten = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childRewritten, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - return this; + return super.rewrite(reader); } @Override - public Query clone() { - ChildrenConstantScoreQuery q = (ChildrenConstantScoreQuery) super.clone(); - q.originalChildQuery = originalChildQuery.clone(); - if (q.rewrittenChildQuery != null) { - q.rewrittenChildQuery = rewrittenChildQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { SearchContext sc = SearchContext.current(); IndexParentChildFieldData globalIfd = parentChildIndexFieldData.loadGlobal(searcher.getIndexReader()); - assert rewrittenChildQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); final long valueCount; List leaves = searcher.getIndexReader().leaves(); @@ -116,9 +103,9 @@ public class ChildrenConstantScoreQuery extends Query { return new BooleanQuery().createWeight(searcher, needsScores); } - Query childQuery = rewrittenChildQuery; IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); ParentOrdCollector collector = new ParentOrdCollector(globalIfd, valueCount, parentType); indexSearcher.search(childQuery, collector); @@ -141,12 +128,12 @@ public class ChildrenConstantScoreQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ChildrenConstantScoreQuery that = (ChildrenConstantScoreQuery) obj; - if (!originalChildQuery.equals(that.originalChildQuery)) { + if (!childQuery.equals(that.childQuery)) { return false; } if (!childType.equals(that.childType)) { @@ -155,24 +142,21 @@ public class ChildrenConstantScoreQuery extends Query { if (shortCircuitParentDocSet != that.shortCircuitParentDocSet) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } return true; } @Override public int hashCode() { - int result = originalChildQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + childQuery.hashCode(); result = 31 * result + childType.hashCode(); result = 31 * result + shortCircuitParentDocSet; - result = 31 * result + Float.floatToIntBits(getBoost()); return result; } @Override public String toString(String field) { - return "child_filter[" + childType + "/" + parentType + "](" + originalChildQuery + ')'; + return "child_filter[" + childType + "/" + parentType + "](" + childQuery + ')'; } private final class ParentWeight extends Weight { diff --git a/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java index e253e1a848d..18f004f7133 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java @@ -40,6 +40,7 @@ import org.apache.lucene.util.ToStringUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; import org.elasticsearch.common.util.BigArrays; @@ -63,28 +64,25 @@ import java.util.Set; * all parent documents having the same uid value that is collected in the first phase are emitted as hit including * a score based on the aggregated child scores and score type. */ -public class ChildrenQuery extends Query { +public final class ChildrenQuery extends IndexCacheableQuery { protected final ParentChildIndexFieldData ifd; protected final String parentType; protected final String childType; protected final Filter parentFilter; protected final ScoreType scoreType; - protected Query originalChildQuery; + protected Query childQuery; protected final int minChildren; protected final int maxChildren; protected final int shortCircuitParentDocSet; protected final BitDocIdSetFilter nonNestedDocsFilter; - protected Query rewrittenChildQuery; - protected IndexReader rewriteIndexReader; - public ChildrenQuery(ParentChildIndexFieldData ifd, String parentType, String childType, Filter parentFilter, Query childQuery, ScoreType scoreType, int minChildren, int maxChildren, int shortCircuitParentDocSet, BitDocIdSetFilter nonNestedDocsFilter) { this.ifd = ifd; this.parentType = parentType; this.childType = childType; this.parentFilter = parentFilter; - this.originalChildQuery = childQuery; + this.childQuery = childQuery; this.scoreType = scoreType; this.shortCircuitParentDocSet = shortCircuitParentDocSet; this.nonNestedDocsFilter = nonNestedDocsFilter; @@ -93,25 +91,33 @@ public class ChildrenQuery extends Query { this.maxChildren = maxChildren; } + @Override + public Query rewrite(IndexReader reader) throws IOException { + final Query childRewritten = childQuery.rewrite(reader); + if (childRewritten != childQuery) { + Query rewritten = new ChildrenQuery(ifd, parentType, childType, parentFilter, childRewritten, scoreType, minChildren, maxChildren, shortCircuitParentDocSet, nonNestedDocsFilter); + rewritten.setBoost(getBoost()); + return rewritten; + } + return super.rewrite(reader); + } + @Override public boolean equals(Object obj) { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ChildrenQuery that = (ChildrenQuery) obj; - if (!originalChildQuery.equals(that.originalChildQuery)) { + if (!childQuery.equals(that.childQuery)) { return false; } if (!childType.equals(that.childType)) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } if (minChildren != that.minChildren) { return false; } @@ -123,9 +129,9 @@ public class ChildrenQuery extends Query { @Override public int hashCode() { - int result = originalChildQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + childQuery.hashCode(); result = 31 * result + childType.hashCode(); - result = 31 * result + Float.floatToIntBits(getBoost()); result = 31 * result + minChildren; result = 31 * result + maxChildren; return result; @@ -135,36 +141,12 @@ public class ChildrenQuery extends Query { public String toString(String field) { int max = maxChildren == 0 ? Integer.MAX_VALUE : maxChildren; return "ChildrenQuery[min(" + Integer.toString(minChildren) + ") max(" + Integer.toString(max) + ")of " + childType + "/" - + parentType + "](" + originalChildQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); + + parentType + "](" + childQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); } @Override - // See TopChildrenQuery#rewrite - public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenChildQuery == null) { - rewriteIndexReader = reader; - rewrittenChildQuery = originalChildQuery.rewrite(reader); - } - return this; - } - - @Override - public Query clone() { - ChildrenQuery q = (ChildrenQuery) super.clone(); - q.originalChildQuery = originalChildQuery.clone(); - if (q.rewrittenChildQuery != null) { - q.rewrittenChildQuery = rewrittenChildQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { SearchContext sc = SearchContext.current(); - assert rewrittenChildQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader - + " searcher.getIndexReader()=" + searcher.getIndexReader(); - final Query childQuery = rewrittenChildQuery; IndexParentChildFieldData globalIfd = ifd.loadGlobal(searcher.getIndexReader()); if (globalIfd == null) { @@ -173,6 +155,7 @@ public class ChildrenQuery extends Query { } IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); boolean abort = true; long numFoundParents; @@ -230,7 +213,7 @@ public class ChildrenQuery extends Query { } else { parentFilter = this.parentFilter; } - return new ParentWeight(this, rewrittenChildQuery.createWeight(searcher, needsScores), parentFilter, numFoundParents, collector, minChildren, + return new ParentWeight(this, childQuery.createWeight(searcher, needsScores), parentFilter, numFoundParents, collector, minChildren, maxChildren); } diff --git a/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java b/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java deleted file mode 100644 index 0adfff07e43..00000000000 --- a/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.search.child; - -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.*; -import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lucene.search.NoCacheFilter; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.SearchContext.Lifetime; - -import java.io.IOException; -import java.util.IdentityHashMap; - -/** - * Forked from {@link QueryWrapperFilter} to make sure the weight is only created once. - * This filter should never be cached! This filter only exists for internal usage. - * - * @elasticsearch.internal - */ -public class CustomQueryWrappingFilter extends NoCacheFilter implements Releasable { - - private final Query query; - - private IndexSearcher searcher; - private IdentityHashMap docIdSets; - - /** Constructs a filter which only matches documents matching - * query. - */ - public CustomQueryWrappingFilter(Query query) { - if (query == null) - throw new NullPointerException("Query may not be null"); - this.query = query; - } - - /** returns the inner Query */ - public final Query getQuery() { - return query; - } - - @Override - public DocIdSet getDocIdSet(final LeafReaderContext context, final Bits acceptDocs) throws IOException { - final SearchContext searchContext = SearchContext.current(); - if (docIdSets == null) { - assert searcher == null; - IndexSearcher searcher = searchContext.searcher(); - docIdSets = new IdentityHashMap<>(); - this.searcher = searcher; - searchContext.addReleasable(this, Lifetime.COLLECTION); - - final Weight weight = searcher.createNormalizedWeight(query, false); - for (final LeafReaderContext leaf : searcher.getTopReaderContext().leaves()) { - final DocIdSet set = new DocIdSet() { - @Override - public DocIdSetIterator iterator() throws IOException { - return weight.scorer(leaf, null); - } - @Override - public boolean isCacheable() { return false; } - - @Override - public long ramBytesUsed() { - return 0; - } - }; - docIdSets.put(leaf.reader(), set); - } - } else { - assert searcher == SearchContext.current().searcher(); - } - final DocIdSet set = docIdSets.get(context.reader()); - return BitsFilteredDocIdSet.wrap(set, acceptDocs); - } - - @Override - public void close() { - // We need to clear the docIdSets, otherwise this is leaved unused - // DocIdSets around and can potentially become a memory leak. - docIdSets = null; - searcher = null; - } - - @Override - public String toString(String field) { - return "CustomQueryWrappingFilter(" + query + ")"; - } - - @Override - public boolean equals(Object o) { - if (o == this) { - return true; - } - if (o != null && o instanceof CustomQueryWrappingFilter && - this.query.equals(((CustomQueryWrappingFilter)o).query)) { - return true; - } - - return false; - } - - @Override - public int hashCode() { - return query.hashCode() ^ 0x823D64C9; - } - - /** @return Whether {@link CustomQueryWrappingFilter} should be used. */ - public static boolean shouldUseCustomQueryWrappingFilter(Query query) { - if (query instanceof TopChildrenQuery || query instanceof ChildrenConstantScoreQuery - || query instanceof ChildrenQuery || query instanceof ParentConstantScoreQuery - || query instanceof ParentQuery) { - return true; - } else { - return false; - } - } -} diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java b/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java index ce1be7e25d3..5d2d1101ff7 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.apache.lucene.util.LongBitSet; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; import org.elasticsearch.index.fielddata.AtomicParentChildFieldData; @@ -47,48 +48,34 @@ import java.util.Set; /** * A query that only return child documents that are linked to the parent documents that matched with the inner query. */ -public class ParentConstantScoreQuery extends Query { +public class ParentConstantScoreQuery extends IndexCacheableQuery { private final ParentChildIndexFieldData parentChildIndexFieldData; - private Query originalParentQuery; + private Query parentQuery; private final String parentType; private final Filter childrenFilter; - private Query rewrittenParentQuery; - private IndexReader rewriteIndexReader; - public ParentConstantScoreQuery(ParentChildIndexFieldData parentChildIndexFieldData, Query parentQuery, String parentType, Filter childrenFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; - this.originalParentQuery = parentQuery; + this.parentQuery = parentQuery; this.parentType = parentType; this.childrenFilter = childrenFilter; } @Override - // See TopChildrenQuery#rewrite public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenParentQuery == null) { - rewrittenParentQuery = originalParentQuery.rewrite(reader); - rewriteIndexReader = reader; + Query parentRewritten = parentQuery.rewrite(reader); + if (parentRewritten != parentQuery) { + Query rewritten = new ParentConstantScoreQuery(parentChildIndexFieldData, parentRewritten, parentType, childrenFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - return this; + return super.rewrite(reader); } @Override - public Query clone() { - ParentConstantScoreQuery q = (ParentConstantScoreQuery) super.clone(); - q.originalParentQuery = originalParentQuery.clone(); - if (q.rewrittenParentQuery != null) { - q.rewrittenParentQuery = rewrittenParentQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { IndexParentChildFieldData globalIfd = parentChildIndexFieldData.loadGlobal(searcher.getIndexReader()); - assert rewrittenParentQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); final long maxOrd; List leaves = searcher.getIndexReader().leaves(); @@ -104,10 +91,10 @@ public class ParentConstantScoreQuery extends Query { return new BooleanQuery().createWeight(searcher, needsScores); } - final Query parentQuery = rewrittenParentQuery; ParentOrdsCollector collector = new ParentOrdsCollector(globalIfd, maxOrd, parentType); IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); indexSearcher.search(parentQuery, collector); if (collector.parentCount() == 0) { @@ -119,9 +106,9 @@ public class ParentConstantScoreQuery extends Query { @Override public int hashCode() { - int result = originalParentQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + parentQuery.hashCode(); result = 31 * result + parentType.hashCode(); - result = 31 * result + Float.floatToIntBits(getBoost()); return result; } @@ -130,26 +117,23 @@ public class ParentConstantScoreQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ParentConstantScoreQuery that = (ParentConstantScoreQuery) obj; - if (!originalParentQuery.equals(that.originalParentQuery)) { + if (!parentQuery.equals(that.parentQuery)) { return false; } if (!parentType.equals(that.parentType)) { return false; } - if (this.getBoost() != that.getBoost()) { - return false; - } return true; } @Override public String toString(String field) { - return "parent_filter[" + parentType + "](" + originalParentQuery + ')'; + return "parent_filter[" + parentType + "](" + parentQuery + ')'; } private final class ChildrenWeight extends Weight { diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java b/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java index cf16d78af77..cc34da404bb 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitDocIdSet; @@ -40,7 +41,6 @@ import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LongBitSet; import org.apache.lucene.util.SparseFixedBitSet; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.util.BytesRefHash; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.index.mapper.Uid; @@ -66,9 +66,9 @@ final class ParentIdsFilter extends Filter { BooleanQuery bq = new BooleanQuery(); bq.add(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id))), Occur.MUST); bq.add(nonNestedDocsFilter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { - return Queries.wrap(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); + return new QueryWrapperFilter(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); } } else { BytesRefHash parentIds= null; @@ -98,9 +98,9 @@ final class ParentIdsFilter extends Filter { BooleanQuery bq = new BooleanQuery(); bq.add(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id))), Occur.MUST); bq.add(nonNestedDocsFilter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { - return Queries.wrap(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); + return new QueryWrapperFilter(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); } } else { BytesRefHash parentIds = null; diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java b/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java index 388f5e46a35..ec3ed4862e8 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java @@ -24,19 +24,21 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Collector; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Filter; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.apache.lucene.util.ToStringUtils; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; import org.elasticsearch.common.util.BigArrays; @@ -55,19 +57,16 @@ import java.util.Set; * connects the matching parent docs to the related child documents * using the {@link ParentChildIndexFieldData}. */ -public class ParentQuery extends Query { +public class ParentQuery extends IndexCacheableQuery { private final ParentChildIndexFieldData parentChildIndexFieldData; - private Query originalParentQuery; + private Query parentQuery; private final String parentType; private final Filter childrenFilter; - private Query rewrittenParentQuery; - private IndexReader rewriteIndexReader; - public ParentQuery(ParentChildIndexFieldData parentChildIndexFieldData, Query parentQuery, String parentType, Filter childrenFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; - this.originalParentQuery = parentQuery; + this.parentQuery = parentQuery; this.parentType = parentType; this.childrenFilter = childrenFilter; } @@ -77,26 +76,24 @@ public class ParentQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ParentQuery that = (ParentQuery) obj; - if (!originalParentQuery.equals(that.originalParentQuery)) { + if (!parentQuery.equals(that.parentQuery)) { return false; } if (!parentType.equals(that.parentType)) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } return true; } @Override public int hashCode() { - int result = originalParentQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + parentQuery.hashCode(); result = 31 * result + parentType.hashCode(); result = 31 * result + Float.floatToIntBits(getBoost()); return result; @@ -104,31 +101,22 @@ public class ParentQuery extends Query { @Override public String toString(String field) { - return "ParentQuery[" + parentType + "](" + originalParentQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); + return "ParentQuery[" + parentType + "](" + parentQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); } @Override - // See TopChildrenQuery#rewrite public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenParentQuery == null) { - rewriteIndexReader = reader; - rewrittenParentQuery = originalParentQuery.rewrite(reader); + Query parentRewritten = parentQuery.rewrite(reader); + if (parentRewritten != parentQuery) { + Query rewritten = new ParentQuery(parentChildIndexFieldData, parentRewritten, parentType, childrenFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - return this; + return super.rewrite(reader); } @Override - public Query clone() { - ParentQuery q = (ParentQuery) super.clone(); - q.originalParentQuery = originalParentQuery.clone(); - if (q.rewrittenParentQuery != null) { - q.rewrittenParentQuery = rewrittenParentQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { SearchContext sc = SearchContext.current(); ChildWeight childWeight; boolean releaseCollectorResource = true; @@ -140,12 +128,10 @@ public class ParentQuery extends Query { } try { - assert rewrittenParentQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); - final Query parentQuery = rewrittenParentQuery; collector = new ParentOrdAndScoreCollector(sc, globalIfd, parentType); IndexSearcher indexSearcher = new IndexSearcher(sc.searcher().getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); indexSearcher.search(parentQuery, collector); if (collector.parentCount() == 0) { return new BooleanQuery().createWeight(searcher, needsScores); @@ -162,7 +148,7 @@ public class ParentQuery extends Query { return childWeight; } - private static class ParentOrdAndScoreCollector extends NoopCollector implements Releasable { + private static class ParentOrdAndScoreCollector implements Collector, Releasable { private final LongHash parentIdxs; private FloatArray scores; @@ -170,9 +156,6 @@ public class ParentQuery extends Query { private final BigArrays bigArrays; private final String parentType; - private Scorer scorer; - private SortedDocValues values; - ParentOrdAndScoreCollector(SearchContext searchContext, IndexParentChildFieldData globalIfd, String parentType) { this.bigArrays = searchContext.bigArrays(); this.parentIdxs = new LongHash(512, bigArrays); @@ -182,30 +165,36 @@ public class ParentQuery extends Query { } @Override - public void collect(int doc) throws IOException { - // It can happen that for particular segment no document exist for an specific type. This prevents NPE - if (values != null) { - long globalOrdinal = values.getOrd(doc); - if (globalOrdinal != SortedSetDocValues.NO_MORE_ORDS) { - long parentIdx = parentIdxs.add(globalOrdinal); - if (parentIdx >= 0) { - scores = bigArrays.grow(scores, parentIdx + 1); - scores.set(parentIdx, scorer.score()); - } else { - assert false : "parent id should only match once, since there can only be one parent doc"; + public boolean needsScores() { + return true; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + final SortedDocValues values = globalIfd.load(context).getOrdinalsValues(parentType); + if (values == null) { + return NoopCollector.NOOP_COLLECTOR; + } + return new LeafCollector() { + Scorer scorer; + @Override + public void setScorer(Scorer scorer) throws IOException { + this.scorer = scorer; + } + @Override + public void collect(int doc) throws IOException { + long globalOrdinal = values.getOrd(doc); + if (globalOrdinal != SortedSetDocValues.NO_MORE_ORDS) { + long parentIdx = parentIdxs.add(globalOrdinal); + if (parentIdx >= 0) { + scores = bigArrays.grow(scores, parentIdx + 1); + scores.set(parentIdx, scorer.score()); + } else { + assert false : "parent id should only match once, since there can only be one parent doc"; + } } } - } - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - this.scorer = scorer; - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - values = globalIfd.load(context).getOrdinalsValues(parentType); + }; } @Override @@ -262,12 +251,16 @@ public class ParentQuery extends Query { if (DocIdSets.isEmpty(childrenDocSet)) { return null; } + final DocIdSetIterator childIterator = childrenDocSet.iterator(); + if (childIterator == null) { + return null; + } SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType); if (bytesValues == null) { return null; } - return new ChildScorer(this, parentIdxs, scores, childrenDocSet.iterator(), bytesValues); + return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues); } } diff --git a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java index 0529350863f..4fc233b21b9 100644 --- a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.*; import org.apache.lucene.util.*; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.search.EmptyScorer; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.index.fielddata.IndexParentChildFieldData; @@ -53,7 +54,7 @@ import java.util.Set; * This query is most of the times faster than the {@link ChildrenQuery}. Usually enough parent documents can be returned * in the first child document query round. */ -public class TopChildrenQuery extends Query { +public class TopChildrenQuery extends IndexCacheableQuery { private static final ParentDocComparator PARENT_DOC_COMP = new ParentDocComparator(); @@ -63,17 +64,13 @@ public class TopChildrenQuery extends Query { private final ScoreType scoreType; private final int factor; private final int incrementalFactor; - private Query originalChildQuery; + private Query childQuery; private final BitDocIdSetFilter nonNestedDocsFilter; - // This field will hold the rewritten form of originalChildQuery, so that we can reuse it - private Query rewrittenChildQuery; - private IndexReader rewriteIndexReader; - // Note, the query is expected to already be filtered to only child type docs public TopChildrenQuery(IndexParentChildFieldData parentChildIndexFieldData, Query childQuery, String childType, String parentType, ScoreType scoreType, int factor, int incrementalFactor, BitDocIdSetFilter nonNestedDocsFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; - this.originalChildQuery = childQuery; + this.childQuery = childQuery; this.childType = childType; this.parentType = parentType; this.scoreType = scoreType; @@ -82,35 +79,19 @@ public class TopChildrenQuery extends Query { this.nonNestedDocsFilter = nonNestedDocsFilter; } - // Rewrite invocation logic: - // 1) query_then|and_fetch (default): Rewrite is execute as part of the createWeight invocation, when search child docs. - // 2) dfs_query_then|and_fetch:: First rewrite and then createWeight is executed. During query phase rewrite isn't - // executed any more because searchContext#queryRewritten() returns true. @Override public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenChildQuery == null) { - rewrittenChildQuery = originalChildQuery.rewrite(reader); - rewriteIndexReader = reader; + Query childRewritten = childQuery.rewrite(reader); + if (childRewritten != childQuery) { + Query rewritten = new TopChildrenQuery(parentChildIndexFieldData, childRewritten, childType, parentType, scoreType, factor, incrementalFactor, nonNestedDocsFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - // We can always return the current instance, and we can do this b/c the child query is executed separately - // before the main query (other scope) in a different IS#search() invocation than the main query. - // In fact we only need override the rewrite method because for the dfs phase, to get also global document - // frequency for the child query. - return this; + return super.rewrite(reader); } @Override - public Query clone() { - TopChildrenQuery q = (TopChildrenQuery) super.clone(); - q.originalChildQuery = originalChildQuery.clone(); - if (q.rewrittenChildQuery != null) { - q.rewrittenChildQuery = rewrittenChildQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { ObjectObjectOpenHashMap parentDocs = new ObjectObjectOpenHashMap<>(); SearchContext searchContext = SearchContext.current(); @@ -121,16 +102,9 @@ public class TopChildrenQuery extends Query { } int numChildDocs = requestedDocs * factor; - Query childQuery; - if (rewrittenChildQuery == null) { - childQuery = rewrittenChildQuery = searcher.rewrite(originalChildQuery); - } else { - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); - childQuery = rewrittenChildQuery; - } - IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); while (true) { parentDocs.clear(); TopDocs topChildDocs = indexSearcher.search(childQuery, numChildDocs); @@ -155,7 +129,7 @@ public class TopChildrenQuery extends Query { } } - ParentWeight parentWeight = new ParentWeight(this, rewrittenChildQuery.createWeight(searcher, needsScores), parentDocs); + ParentWeight parentWeight = new ParentWeight(this, childQuery.createWeight(searcher, needsScores), parentDocs); searchContext.addReleasable(parentWeight, Lifetime.COLLECTION); return parentWeight; } @@ -251,12 +225,12 @@ public class TopChildrenQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } TopChildrenQuery that = (TopChildrenQuery) obj; - if (!originalChildQuery.equals(that.originalChildQuery)) { + if (!childQuery.equals(that.childQuery)) { return false; } if (!childType.equals(that.childType)) { @@ -265,25 +239,22 @@ public class TopChildrenQuery extends Query { if (incrementalFactor != that.incrementalFactor) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } return true; } @Override public int hashCode() { - int result = originalChildQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + childQuery.hashCode(); result = 31 * result + parentType.hashCode(); result = 31 * result + incrementalFactor; - result = 31 * result + Float.floatToIntBits(getBoost()); return result; } @Override public String toString(String field) { StringBuilder sb = new StringBuilder(); - sb.append("score_child[").append(childType).append("/").append(parentType).append("](").append(originalChildQuery.toString(field)).append(')'); + sb.append("score_child[").append(childType).append("/").append(parentType).append("](").append(childQuery.toString(field)).append(')'); sb.append(ToStringUtils.boost(getBoost())); return sb.toString(); } diff --git a/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java b/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java index f8020f4b95b..880b1f54254 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java @@ -22,8 +22,8 @@ package org.elasticsearch.index.search.geo; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; /** @@ -48,13 +48,13 @@ public class IndexedGeoBoundingBoxFilter { filter.add(fieldMapper.lonMapper().rangeFilter(null, bottomRight.lon(), true, true), Occur.SHOULD); filter.add(fieldMapper.lonMapper().rangeFilter(topLeft.lon(), null, true, true), Occur.SHOULD); filter.add(fieldMapper.latMapper().rangeFilter(bottomRight.lat(), topLeft.lat(), true, true), Occur.MUST); - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } private static Filter eastGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight, GeoPointFieldMapper fieldMapper) { BooleanQuery filter = new BooleanQuery(); filter.add(fieldMapper.lonMapper().rangeFilter(topLeft.lon(), bottomRight.lon(), true, true), Occur.MUST); filter.add(fieldMapper.latMapper().rangeFilter(bottomRight.lat(), topLeft.lat(), true, true), Occur.MUST); - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } } diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index d8e37e40c2e..2f3c0371e15 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -555,7 +555,7 @@ public class IndexShard extends AbstractIndexShardComponent { Query query = queryParserService.parseQuery(source).query(); Filter searchFilter = mapperService.searchFilter(types); if (searchFilter != null) { - query = new FilteredQuery(query, indexCache.filter().cache(searchFilter, null, queryParserService.autoFilterCachePolicy())); + query = new FilteredQuery(query, searchFilter); } Filter aliasFilter = indexAliasesService.aliasFilter(filteringAliases); @@ -753,7 +753,7 @@ public class IndexShard extends AbstractIndexShardComponent { engine.flushAndClose(); } } finally { // playing safe here and close the engine even if the above succeeds - close can be called multiple times - IOUtils.close(engine); + IOUtils.close(engine, shardFilterCache); } } } @@ -1291,6 +1291,6 @@ public class IndexShard extends AbstractIndexShardComponent { }; return new EngineConfig(shardId, threadPool, indexingService, indexSettingsService, warmer, store, deletionPolicy, translog, mergePolicyProvider, mergeScheduler, - mapperAnalyzer, similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer); + mapperAnalyzer, similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.filter(), indexCache.filterPolicy()); } } diff --git a/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java index ee0ff0c81c6..ddc04403e7f 100644 --- a/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java +++ b/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices; import com.google.common.collect.Lists; import com.google.common.collect.Maps; + import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; diff --git a/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java b/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java index 7447eea4d89..0ee4f07c7e2 100644 --- a/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java +++ b/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java @@ -19,212 +19,290 @@ package org.elasticsearch.indices.cache.filter; -import com.carrotsearch.hppc.ObjectOpenHashSet; -import com.google.common.base.Objects; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.LRUQueryCache; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryCache; +import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.Bits; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lucene.ShardCoreKeyMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.MemorySizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; -import org.elasticsearch.node.settings.NodeSettingsService; -import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.index.cache.filter.FilterCacheStats; +import org.elasticsearch.index.shard.ShardId; -import java.util.Iterator; +import java.io.Closeable; +import java.io.IOException; +import java.util.HashMap; +import java.util.IdentityHashMap; +import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; +import java.util.concurrent.ConcurrentHashMap; -public class IndicesFilterCache extends AbstractComponent implements RemovalListener { +public class IndicesFilterCache extends AbstractComponent implements QueryCache, Closeable { - private final ThreadPool threadPool; + public static final String INDICES_CACHE_QUERY_SIZE = "indices.cache.filter.size"; + public static final String INDICES_CACHE_QUERY_COUNT = "indices.cache.filter.count"; - private Cache cache; + private final LRUQueryCache cache; + private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap(); + private final Map shardStats = new ConcurrentHashMap<>(); + private volatile long sharedRamBytesUsed; - private volatile String size; - private volatile long sizeInBytes; - private volatile TimeValue expire; - private volatile int concurrencyLevel; - - private final TimeValue cleanInterval; - private final int minimumEntryWeight; - - private final Set readersKeysToClean = ConcurrentCollections.newConcurrentSet(); - - private volatile boolean closed; - - public static final String INDICES_CACHE_FILTER_SIZE = "indices.cache.filter.size"; - public static final String INDICES_CACHE_FILTER_EXPIRE = "indices.cache.filter.expire"; - public static final String INDICES_CACHE_FILTER_CONCURRENCY_LEVEL = "indices.cache.filter.concurrency_level"; - public static final String INDICES_CACHE_FILTER_CLEAN_INTERVAL = "indices.cache.filter.clean_interval"; - public static final String INDICES_CACHE_FILTER_MINIMUM_ENTRY_WEIGHT = "indices.cache.filter.minimum_entry_weight"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - boolean replace = false; - String size = settings.get(INDICES_CACHE_FILTER_SIZE, IndicesFilterCache.this.size); - if (!size.equals(IndicesFilterCache.this.size)) { - logger.info("updating [{}] from [{}] to [{}]", - INDICES_CACHE_FILTER_SIZE, IndicesFilterCache.this.size, size); - IndicesFilterCache.this.size = size; - replace = true; - } - TimeValue expire = settings.getAsTime(INDICES_CACHE_FILTER_EXPIRE, IndicesFilterCache.this.expire); - if (!Objects.equal(expire, IndicesFilterCache.this.expire)) { - logger.info("updating [{}] from [{}] to [{}]", - INDICES_CACHE_FILTER_EXPIRE, IndicesFilterCache.this.expire, expire); - IndicesFilterCache.this.expire = expire; - replace = true; - } - final int concurrencyLevel = settings.getAsInt(INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, IndicesFilterCache.this.concurrencyLevel); - if (concurrencyLevel <= 0) { - throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); - } - if (!Objects.equal(concurrencyLevel, IndicesFilterCache.this.concurrencyLevel)) { - logger.info("updating [{}] from [{}] to [{}]", - INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, IndicesFilterCache.this.concurrencyLevel, concurrencyLevel); - IndicesFilterCache.this.concurrencyLevel = concurrencyLevel; - replace = true; - } - if (replace) { - Cache oldCache = IndicesFilterCache.this.cache; - computeSizeInBytes(); - buildCache(); - oldCache.invalidateAll(); - } - } - } + // This is a hack for the fact that the close listener for the + // ShardCoreKeyMap will be called before onDocIdSetEviction + // See onDocIdSetEviction for more info + private final Map stats2 = new IdentityHashMap<>(); @Inject - public IndicesFilterCache(Settings settings, ThreadPool threadPool, NodeSettingsService nodeSettingsService) { + public IndicesFilterCache(Settings settings) { super(settings); - this.threadPool = threadPool; - this.size = settings.get(INDICES_CACHE_FILTER_SIZE, "10%"); - this.expire = settings.getAsTime(INDICES_CACHE_FILTER_EXPIRE, null); - this.minimumEntryWeight = settings.getAsInt(INDICES_CACHE_FILTER_MINIMUM_ENTRY_WEIGHT, 1024); // 1k per entry minimum - if (minimumEntryWeight <= 0) { - throw new IllegalArgumentException("minimum_entry_weight must be > 0 but was: " + minimumEntryWeight); + final String sizeString = settings.get(INDICES_CACHE_QUERY_SIZE, "10%"); + final ByteSizeValue size = MemorySizeValue.parseBytesSizeValueOrHeapRatio(sizeString); + final int count = settings.getAsInt(INDICES_CACHE_QUERY_COUNT, 100000); + logger.debug("using [node] weighted filter cache with size [{}], actual_size [{}], max filter count [{}]", + sizeString, size, count); + cache = new LRUQueryCache(count, size.bytes()) { + + private Stats getStats(Object coreKey) { + final ShardId shardId = shardKeyMap.getShardId(coreKey); + if (shardId == null) { + return null; + } + return shardStats.get(shardId); + } + + private Stats getOrCreateStats(Object coreKey) { + final ShardId shardId = shardKeyMap.getShardId(coreKey); + Stats stats = shardStats.get(shardId); + if (stats == null) { + stats = new Stats(); + shardStats.put(shardId, stats); + } + return stats; + } + + // It's ok to not protect these callbacks by a lock since it is + // done in LRUQueryCache + @Override + protected void onClear() { + assert Thread.holdsLock(this); + super.onClear(); + for (Stats stats : shardStats.values()) { + // don't throw away hit/miss + stats.cacheSize = 0; + stats.ramBytesUsed = 0; + } + sharedRamBytesUsed = 0; + } + + @Override + protected void onQueryCache(Query filter, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onQueryCache(filter, ramBytesUsed); + sharedRamBytesUsed += ramBytesUsed; + } + + @Override + protected void onQueryEviction(Query filter, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onQueryEviction(filter, ramBytesUsed); + sharedRamBytesUsed -= ramBytesUsed; + } + + @Override + protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onDocIdSetCache(readerCoreKey, ramBytesUsed); + final Stats shardStats = getOrCreateStats(readerCoreKey); + shardStats.cacheSize += 1; + shardStats.cacheCount += 1; + shardStats.ramBytesUsed += ramBytesUsed; + + StatsAndCount statsAndCount = stats2.get(readerCoreKey); + if (statsAndCount == null) { + statsAndCount = new StatsAndCount(shardStats); + stats2.put(readerCoreKey, statsAndCount); + } + statsAndCount.count += 1; + } + + @Override + protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) { + assert Thread.holdsLock(this); + super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed); + // We can't use ShardCoreKeyMap here because its core closed + // listener is called before the listener of the cache which + // triggers this eviction. So instead we use use stats2 that + // we only evict when nothing is cached anymore on the segment + // instead of relying on close listeners + final StatsAndCount statsAndCount = stats2.get(readerCoreKey); + final Stats shardStats = statsAndCount.stats; + shardStats.cacheSize -= numEntries; + shardStats.ramBytesUsed -= sumRamBytesUsed; + statsAndCount.count -= numEntries; + if (statsAndCount.count == 0) { + stats2.remove(readerCoreKey); + } + } + + @Override + protected void onHit(Object readerCoreKey, Query filter) { + assert Thread.holdsLock(this); + super.onHit(readerCoreKey, filter); + final Stats shardStats = getStats(readerCoreKey); + shardStats.hitCount += 1; + } + + @Override + protected void onMiss(Object readerCoreKey, Query filter) { + assert Thread.holdsLock(this); + super.onMiss(readerCoreKey, filter); + final Stats shardStats = getOrCreateStats(readerCoreKey); + shardStats.missCount += 1; + } + }; + sharedRamBytesUsed = 0; + } + + /** Get usage statistics for the given shard. */ + public FilterCacheStats getStats(ShardId shard) { + final Map stats = new HashMap<>(); + for (Map.Entry entry : shardStats.entrySet()) { + stats.put(entry.getKey(), entry.getValue().toQueryCacheStats()); } - this.cleanInterval = settings.getAsTime(INDICES_CACHE_FILTER_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60)); - // defaults to 4, but this is a busy map for all indices, increase it a bit - this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, 16); - if (concurrencyLevel <= 0) { - throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); + FilterCacheStats shardStats = new FilterCacheStats(); + FilterCacheStats info = stats.get(shard); + if (info == null) { + info = new FilterCacheStats(); } - computeSizeInBytes(); - buildCache(); - logger.debug("using [node] weighted filter cache with size [{}], actual_size [{}], expire [{}], clean_interval [{}]", - size, new ByteSizeValue(sizeInBytes), expire, cleanInterval); + shardStats.add(info); - nodeSettingsService.addListener(new ApplySettings()); - threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, new ReaderCleaner()); - } - - private void buildCache() { - CacheBuilder cacheBuilder = CacheBuilder.newBuilder() - .removalListener(this) - .maximumWeight(sizeInBytes).weigher(new WeightedFilterCache.FilterCacheValueWeigher(minimumEntryWeight)); - - cacheBuilder.concurrencyLevel(this.concurrencyLevel); - - if (expire != null) { - cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS); + // We also have some shared ram usage that we try to distribute to + // proportionally to their number of cache entries of each shard + long totalSize = 0; + for (FilterCacheStats s : stats.values()) { + totalSize += s.getCacheSize(); } - - cache = cacheBuilder.build(); - } - - private void computeSizeInBytes() { - this.sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size).bytes(); - } - - public void addReaderKeyToClean(Object readerKey) { - readersKeysToClean.add(readerKey); - } - - public void close() { - closed = true; - cache.invalidateAll(); - } - - public Cache cache() { - return this.cache; + final double weight = totalSize == 0 + ? 1d / stats.size() + : shardStats.getCacheSize() / totalSize; + final long additionalRamBytesUsed = Math.round(weight * sharedRamBytesUsed); + shardStats.add(new FilterCacheStats(additionalRamBytesUsed, 0, 0, 0, 0)); + return shardStats; } @Override - public void onRemoval(RemovalNotification removalNotification) { - WeightedFilterCache.FilterCacheKey key = removalNotification.getKey(); - if (key == null) { - return; - } - if (key.removalListener != null) { - key.removalListener.onRemoval(removalNotification); + public Weight doCache(Weight weight, QueryCachingPolicy policy) { + while (weight instanceof CachingWeightWrapper) { + weight = ((CachingWeightWrapper) weight).in; } + final Weight in = cache.doCache(weight, policy); + // We wrap the weight to track the readers it sees and map them with + // the shards they belong to + return new CachingWeightWrapper(in); } - /** - * The reason we need this class is because we need to clean all the filters that are associated - * with a reader. We don't want to do it every time a reader closes, since iterating over all the map - * is expensive. There doesn't seem to be a nicer way to do it (and maintaining a list per reader - * of the filters will cost more). - */ - class ReaderCleaner implements Runnable { + private class CachingWeightWrapper extends Weight { - // this is thread safe since we only schedule the next cleanup once the current one is - // done, so no concurrent execution - private final ObjectOpenHashSet keys = ObjectOpenHashSet.newInstance(); + private final Weight in; + + protected CachingWeightWrapper(Weight in) { + super(in.getQuery()); + this.in = in; + } @Override - public void run() { - if (closed) { - return; - } - if (readersKeysToClean.isEmpty()) { - schedule(); - return; - } - try { - threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() { - @Override - public void run() { - keys.clear(); - for (Iterator it = readersKeysToClean.iterator(); it.hasNext(); ) { - keys.add(it.next()); - it.remove(); - } - if (!keys.isEmpty()) { - for (Iterator it = cache.asMap().keySet().iterator(); it.hasNext(); ) { - WeightedFilterCache.FilterCacheKey filterCacheKey = it.next(); - if (keys.contains(filterCacheKey.readerKey())) { - // same as invalidate - it.remove(); - } - } - } - cache.cleanUp(); - schedule(); - keys.clear(); - } - }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not run ReaderCleaner - execution rejected", ex); - } + public void extractTerms(Set terms) { + in.extractTerms(terms); } - private void schedule() { - try { - threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, this); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not schedule ReaderCleaner - execution rejected", ex); - } + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + shardKeyMap.add(context.reader()); + return in.explain(context, doc); + } + + @Override + public float getValueForNormalization() throws IOException { + return in.getValueForNormalization(); + } + + @Override + public void normalize(float norm, float topLevelBoost) { + in.normalize(norm, topLevelBoost); + } + + @Override + public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + shardKeyMap.add(context.reader()); + return in.scorer(context, acceptDocs); } } + + /** Clear all entries that belong to the given index. */ + public void clearIndex(String index) { + final Set coreCacheKeys = shardKeyMap.getCoreKeysForIndex(index); + for (Object coreKey : coreCacheKeys) { + cache.clearCoreCacheKey(coreKey); + } + + // This cache stores two things: filters, and doc id sets. Calling + // clear only removes the doc id sets, but if we reach the situation + // that the cache does not contain any DocIdSet anymore, then it + // probably means that the user wanted to remove everything. + if (cache.getCacheSize() == 0) { + cache.clear(); + } + } + + @Override + public void close() { + assert shardKeyMap.size() == 0 : shardKeyMap.size(); + assert shardStats.isEmpty(); + assert stats2.isEmpty() : stats2; + cache.clear(); + } + + private static class Stats implements Cloneable { + + volatile long ramBytesUsed; + volatile long hitCount; + volatile long missCount; + volatile long cacheCount; + volatile long cacheSize; + + FilterCacheStats toQueryCacheStats() { + return new FilterCacheStats(ramBytesUsed, hitCount, missCount, cacheCount, cacheSize); + } + } + + private static class StatsAndCount { + int count; + final Stats stats; + + StatsAndCount(Stats stats) { + this.stats = stats; + this.count = 0; + } + } + + private boolean empty(Stats stats) { + if (stats == null) { + return true; + } + return stats.cacheSize == 0 && stats.ramBytesUsed == 0; + } + + public void onClose(ShardId shardId) { + assert empty(shardStats.get(shardId)); + shardStats.remove(shardId); + } } diff --git a/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java b/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java index 2f3063a484f..a074d975c04 100644 --- a/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java +++ b/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java @@ -71,7 +71,9 @@ class MultiDocumentPercolatorIndex implements PercolatorIndex { try { MultiReader mReader = new MultiReader(memoryIndices, true); LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); - DocSearcher docSearcher = new DocSearcher(new IndexSearcher(slowReader), rootDocMemoryIndex); + final IndexSearcher slowSearcher = new IndexSearcher(slowReader); + slowSearcher.setQueryCache(null); + DocSearcher docSearcher = new DocSearcher(slowSearcher, rootDocMemoryIndex); context.initialize(docSearcher, parsedDocument); } catch (IOException e) { throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); diff --git a/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/src/main/java/org/elasticsearch/percolator/PercolateContext.java index b5dfc37bcd6..d752052e829 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ b/src/main/java/org/elasticsearch/percolator/PercolateContext.java @@ -439,11 +439,6 @@ public class PercolateContext extends SearchContext { return bigArrays; } - @Override - public FilterCache filterCache() { - return indexService.cache().filter(); - } - @Override public BitsetFilterCache bitsetFilterCache() { return indexService.bitsetFilterCache(); diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/src/main/java/org/elasticsearch/percolator/PercolatorService.java index 96e7f326e4c..1284edc4902 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -32,6 +32,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.util.BytesRef; @@ -794,14 +795,13 @@ public class PercolatorService extends AbstractComponent { private void queryBasedPercolating(Engine.Searcher percolatorSearcher, PercolateContext context, QueryCollector percolateCollector) throws IOException { Filter percolatorTypeFilter = context.indexService().mapperService().documentMapper(TYPE_NAME).typeFilter(); - percolatorTypeFilter = context.indexService().cache().filter().cache(percolatorTypeFilter, null, context.queryParserService().autoFilterCachePolicy()); final Filter filter; if (context.aliasFilter() != null) { BooleanQuery booleanFilter = new BooleanQuery(); booleanFilter.add(context.aliasFilter(), BooleanClause.Occur.MUST); booleanFilter.add(percolatorTypeFilter, BooleanClause.Occur.MUST); - filter = Queries.wrap(booleanFilter); + filter = new QueryWrapperFilter(booleanFilter); } else { filter = percolatorTypeFilter; } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java index 8bd49e39d63..54d9948537e 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java @@ -89,9 +89,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { if (Fields.FIELDS.match(entry.getKey())) { clearIndicesCacheRequest.fields(request.paramAsStringArray(entry.getKey(), clearIndicesCacheRequest.fields())); } - if (Fields.FILTER_KEYS.match(entry.getKey())) { - clearIndicesCacheRequest.filterKeys(request.paramAsStringArray(entry.getKey(), clearIndicesCacheRequest.filterKeys())); - } } return clearIndicesCacheRequest; @@ -103,7 +100,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { public static final ParseField ID = new ParseField("id", "id_cache"); public static final ParseField RECYCLER = new ParseField("recycler"); public static final ParseField FIELDS = new ParseField("fields"); - public static final ParseField FILTER_KEYS = new ParseField("filter_keys"); } } diff --git a/src/main/java/org/elasticsearch/search/SearchService.java b/src/main/java/org/elasticsearch/search/SearchService.java index 38f4e03a0f1..057b92a5912 100644 --- a/src/main/java/org/elasticsearch/search/SearchService.java +++ b/src/main/java/org/elasticsearch/search/SearchService.java @@ -52,6 +52,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -366,7 +367,9 @@ public class SearchService extends AbstractLifecycleComponent { final SearchContext context = findContext(request.id()); contextProcessing(context); try { - context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity())); + final IndexCache indexCache = context.indexShard().indexService().cache(); + context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(), + indexCache.filter(), indexCache.filterPolicy())); } catch (Throwable e) { freeContext(context.id()); cleanContext(context); @@ -436,7 +439,9 @@ public class SearchService extends AbstractLifecycleComponent { final SearchContext context = findContext(request.id()); contextProcessing(context); try { - context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity())); + final IndexCache indexCache = context.indexShard().indexService().cache(); + context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(), + indexCache.filter(), indexCache.filterPolicy())); } catch (Throwable e) { freeContext(context.id()); cleanContext(context); diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index abc0827a3cc..717834b045c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -20,8 +20,8 @@ package org.elasticsearch.search.aggregations; import com.google.common.collect.ImmutableMap; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.FilteredQuery; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.inject.Inject; @@ -119,9 +119,12 @@ public class AggregationPhase implements SearchPhase { if (!globals.isEmpty()) { BucketCollector globalsCollector = BucketCollector.wrap(globals); Query query = Queries.newMatchAllQuery(); - Filter searchFilter = context.searchFilter(context.types()); + Query searchFilter = context.searchFilter(context.types()); if (searchFilter != null) { - query = new FilteredQuery(query, searchFilter); + BooleanQuery filtered = new BooleanQuery(); + filtered.add(query, Occur.MUST); + filtered.add(searchFilter, Occur.FILTER); + query = filtered; } try { globalsCollector.preCollection(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java index aacd76b0b5b..2f50bbf69ee 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java @@ -84,8 +84,8 @@ public class ChildrenParser implements Aggregator.Parser { parentType = parentFieldMapper.type(); DocumentMapper parentDocMapper = context.mapperService().documentMapper(parentType); if (parentDocMapper != null) { - parentFilter = context.filterCache().cache(parentDocMapper.typeFilter(), null, context.queryParserService().autoFilterCachePolicy()); - childFilter = context.filterCache().cache(childDocMapper.typeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + parentFilter = parentDocMapper.typeFilter(); + childFilter = childDocMapper.typeFilter(); ParentChildIndexFieldData parentChildIndexFieldData = context.fieldData().getForField(parentFieldMapper); config.fieldContext(new FieldContext(parentFieldMapper.names().indexName(), parentChildIndexFieldData, parentFieldMapper)); } else { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java index 3775ab853a2..e456e93c8a1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java @@ -20,16 +20,21 @@ package org.elasticsearch.search.aggregations.bucket.children; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongObjectPagedHashMap; import org.elasticsearch.index.search.child.ConstantScorer; -import org.elasticsearch.search.aggregations.*; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -38,15 +43,19 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; -import java.util.*; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; // The RecordingPerReaderBucketCollector assumes per segment recording which isn't the case for this // aggregation, for this reason that collector can't be used public class ParentToChildrenAggregator extends SingleBucketAggregator { private final String parentType; - private final Filter childFilter; - private final Filter parentFilter; + private final Weight childFilter; + private final Weight parentFilter; private final ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource; // Maybe use PagedGrowableWriter? This will be less wasteful than LongArray, but then we don't have the reuse feature of BigArrays. @@ -69,8 +78,8 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { super(name, factories, aggregationContext, parent, reducers, metaData); this.parentType = parentType; // these two filters are cached in the parser - this.childFilter = childFilter; - this.parentFilter = parentFilter; + this.childFilter = aggregationContext.searchContext().searcher().createNormalizedWeight(childFilter, false); + this.parentFilter = aggregationContext.searchContext().searcher().createNormalizedWeight(parentFilter, false); this.parentOrdToBuckets = aggregationContext.bigArrays().newLongArray(maxOrd, false); this.parentOrdToBuckets.fill(0, maxOrd, -1); this.parentOrdToOtherBuckets = new LongObjectPagedHashMap<>(aggregationContext.bigArrays()); @@ -100,13 +109,9 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); assert globalOrdinals != null; - DocIdSet parentDocIdSet = parentFilter.getDocIdSet(ctx, null); - // The DocIdSets.toSafeBits(...) can convert to FixedBitSet, but this - // will only happen if the none filter cache is used. (which only happens in tests) - // Otherwise the filter cache will produce a bitset based filter. - final Bits parentDocs = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), parentDocIdSet); - DocIdSet childDocIdSet = childFilter.getDocIdSet(ctx, null); - if (DocIdSets.isEmpty(childDocIdSet) == false) { + Scorer parentScorer = parentFilter.scorer(ctx, null); + final Bits parentDocs = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer); + if (childFilter.scorer(ctx, null) != null) { replay.add(ctx); } return new LeafBucketCollector() { @@ -141,18 +146,14 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { this.replay = null; for (LeafReaderContext ctx : replay) { - final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); - - final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); - DocIdSet childDocIdSet = childFilter.getDocIdSet(ctx, ctx.reader().getLiveDocs()); - if (childDocIdSet == null) { - continue; - } - DocIdSetIterator childDocsIter = childDocIdSet.iterator(); + DocIdSetIterator childDocsIter = childFilter.scorer(ctx, ctx.reader().getLiveDocs()); if (childDocsIter == null) { continue; } + final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); + final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); + // Set the scorer, since we now replay only the child docIds sub.setScorer(ConstantScorer.create(childDocsIter, null, 1f)); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java index 6459ff83215..ed974279133 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java @@ -20,6 +20,8 @@ package org.elasticsearch.search.aggregations.bucket.filter; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.search.aggregations.AggregationExecutionException; @@ -42,24 +44,23 @@ import java.util.Map; */ public class FilterAggregator extends SingleBucketAggregator { - private final Filter filter; + private final Weight filter; public FilterAggregator(String name, - org.apache.lucene.search.Filter filter, + Query filter, AggregatorFactories factories, AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); - this.filter = filter; + this.filter = aggregationContext.searchContext().searcher().createNormalizedWeight(filter, false); } @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { - // TODO: use the iterator if the filter does not support random access // no need to provide deleted docs to the filter - final Bits bits = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filter.getDocIdSet(ctx, null)); + final Bits bits = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorer(ctx, null)); return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java index 913d844cb6a..267833a8d95 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java @@ -22,7 +22,8 @@ package org.elasticsearch.search.aggregations.bucket.filters; import com.google.common.collect.Lists; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.search.aggregations.AggregationExecutionException; @@ -49,15 +50,16 @@ public class FiltersAggregator extends BucketsAggregator { static class KeyedFilter { final String key; - final Filter filter; + final Query filter; - KeyedFilter(String key, Filter filter) { + KeyedFilter(String key, Query filter) { this.key = key; this.filter = filter; } } - private final KeyedFilter[] filters; + private final String[] keys; + private final Weight[] filters; private final boolean keyed; public FiltersAggregator(String name, AggregatorFactories factories, List filters, boolean keyed, AggregationContext aggregationContext, @@ -65,18 +67,23 @@ public class FiltersAggregator extends BucketsAggregator { throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); this.keyed = keyed; - this.filters = filters.toArray(new KeyedFilter[filters.size()]); + this.keys = new String[filters.size()]; + this.filters = new Weight[filters.size()]; + for (int i = 0; i < filters.size(); ++i) { + KeyedFilter keyedFilter = filters.get(i); + this.keys[i] = keyedFilter.key; + this.filters[i] = aggregationContext.searchContext().searcher().createNormalizedWeight(keyedFilter.filter, false); + } } @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { - // TODO: use the iterator if the filter does not support random access // no need to provide deleted docs to the filter final Bits[] bits = new Bits[filters.length]; for (int i = 0; i < filters.length; ++i) { - bits[i] = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].filter.getDocIdSet(ctx, null)); - } + bits[i] = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx, null)); + } return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { @@ -92,10 +99,9 @@ public class FiltersAggregator extends BucketsAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { List buckets = Lists.newArrayListWithCapacity(filters.length); - for (int i = 0; i < filters.length; i++) { - KeyedFilter filter = filters[i]; + for (int i = 0; i < keys.length; i++) { long bucketOrd = bucketOrd(owningBucketOrdinal, i); - InternalFilters.Bucket bucket = new InternalFilters.Bucket(filter.key, bucketDocCount(bucketOrd), bucketAggregations(bucketOrd), keyed); + InternalFilters.Bucket bucket = new InternalFilters.Bucket(keys[i], bucketDocCount(bucketOrd), bucketAggregations(bucketOrd), keyed); buckets.add(bucket); } return new InternalFilters(name, buckets, keyed, reducers(), metaData()); @@ -105,8 +111,8 @@ public class FiltersAggregator extends BucketsAggregator { public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = Lists.newArrayListWithCapacity(filters.length); - for (int i = 0; i < filters.length; i++) { - InternalFilters.Bucket bucket = new InternalFilters.Bucket(filters[i].key, 0, subAggs, keyed); + for (int i = 0; i < keys.length; i++) { + InternalFilters.Bucket bucket = new InternalFilters.Bucket(keys[i], 0, subAggs, keyed); buckets.add(bucket); } return new InternalFilters(name, buckets, keyed, reducers(), metaData()); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 0acf04210a1..e6a246162ce 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -22,7 +22,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; @@ -57,9 +56,9 @@ public class NestedAggregator extends SingleBucketAggregator { private DocIdSetIterator childDocs; private BitSet parentDocs; - public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, List reducers, Map metaData, QueryCachingPolicy filterCachingPolicy) throws IOException { + public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, List reducers, Map metaData) throws IOException { super(name, factories, aggregationContext, parentAggregator, reducers, metaData); - childFilter = aggregationContext.searchContext().filterCache().cache(objectMapper.nestedTypeFilter(), null, filterCachingPolicy); + childFilter = objectMapper.nestedTypeFilter(); } @Override @@ -145,12 +144,10 @@ public class NestedAggregator extends SingleBucketAggregator { public static class Factory extends AggregatorFactory { private final String path; - private final QueryCachingPolicy queryCachingPolicy; - public Factory(String name, String path, QueryCachingPolicy queryCachingPolicy) { + public Factory(String name, String path) { super(name, InternalNested.TYPE.name()); this.path = path; - this.queryCachingPolicy = queryCachingPolicy; } @Override @@ -170,7 +167,7 @@ public class NestedAggregator extends SingleBucketAggregator { if (!objectMapper.nested().isNested()) { throw new AggregationExecutionException("[nested] nested path [" + path + "] is not nested"); } - return new NestedAggregator(name, factories, objectMapper, context, parent, reducers, metaData, queryCachingPolicy); + return new NestedAggregator(name, factories, objectMapper, context, parent, reducers, metaData); } private final static class Unmapped extends NonCollectingAggregator { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java index 56da7f51b17..ddf6bf17b6e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java @@ -64,6 +64,6 @@ public class NestedParser implements Aggregator.Parser { parser.getTokenLocation()); } - return new NestedAggregator.Factory(aggregationName, path, context.queryParserService().autoFilterCachePolicy()); + return new NestedAggregator.Factory(aggregationName, path); } } diff --git a/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java b/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java index e65be1c314b..94071f8f6bd 100644 --- a/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java +++ b/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java @@ -36,10 +36,13 @@ public class CachedDfSource extends IndexSearcher { private final int maxDoc; - public CachedDfSource(IndexReader reader, AggregatedDfs aggregatedDfs, Similarity similarity) throws IOException { + public CachedDfSource(IndexReader reader, AggregatedDfs aggregatedDfs, Similarity similarity, + QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) throws IOException { super(reader); this.aggregatedDfs = aggregatedDfs; setSimilarity(similarity); + setQueryCache(queryCache); + setQueryCachingPolicy(queryCachingPolicy); if (aggregatedDfs.maxDoc() > Integer.MAX_VALUE) { maxDoc = Integer.MAX_VALUE; } else { diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 5eef114e5cb..0037bf322c3 100644 --- a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -390,7 +390,7 @@ public class FetchPhase implements SearchPhase { parentFilter = Queries.newNonNestedFilter(); } - Filter childFilter = context.filterCache().cache(nestedObjectMapper.nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + Filter childFilter = nestedObjectMapper.nestedTypeFilter(); if (childFilter == null) { current = nestedParentObjectMapper; continue; diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java index 92968309fcc..05cbb4e178a 100644 --- a/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java @@ -22,8 +22,6 @@ import com.google.common.collect.Maps; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.search.IndexSearcher; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.InternalSearchHit; @@ -42,14 +40,12 @@ public interface FetchSubPhase { private LeafReaderContext readerContext; private int docId; private Map cache; - private IndexSearcher atomicIndexSearcher; public void reset(InternalSearchHit hit, LeafReaderContext context, int docId, IndexReader topLevelReader) { this.hit = hit; this.readerContext = context; this.docId = docId; this.topLevelReader = topLevelReader; - this.atomicIndexSearcher = null; } public InternalSearchHit hit() { @@ -64,15 +60,6 @@ public interface FetchSubPhase { return readerContext; } - public IndexSearcher searcher() { - if (atomicIndexSearcher == null) { - // Use the reader directly otherwise the IndexSearcher assertion will trip because it expects a top level - // reader context. - atomicIndexSearcher = new IndexSearcher(readerContext.reader()); - } - return atomicIndexSearcher; - } - public int docId() { return docId; } diff --git a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java index 6f36da8ee83..8f0e2a1799c 100644 --- a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java +++ b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; @@ -129,7 +130,7 @@ public final class InnerHitsContext { rawParentFilter = parentObjectMapper.nestedTypeFilter(); } BitDocIdSetFilter parentFilter = context.bitsetFilterCache().getBitDocIdSetFilter(rawParentFilter); - Filter childFilter = context.filterCache().cache(childObjectMapper.nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + Filter childFilter = childObjectMapper.nestedTypeFilter(); Query q = new FilteredQuery(query, new NestedChildrenFilter(parentFilter, childFilter, hitContext)); if (size() == 0) { @@ -166,6 +167,28 @@ public final class InnerHitsContext { this.atomicReader = hitContext.readerContext().reader(); } + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + NestedChildrenFilter other = (NestedChildrenFilter) obj; + return parentFilter.equals(other.parentFilter) + && childFilter.equals(other.childFilter) + && docId == other.docId + && atomicReader.getCoreCacheKey() == other.atomicReader.getCoreCacheKey(); + } + + @Override + public int hashCode() { + int hash = super.hashCode(); + hash = 31 * hash + parentFilter.hashCode(); + hash = 31 * hash + childFilter.hashCode(); + hash = 31 * hash + docId; + hash = 31 * hash + atomicReader.getCoreCacheKey().hashCode(); + return hash; + } + @Override public String toString(String field) { return "NestedChildren(parent=" + parentFilter + ",child=" + childFilter + ")"; @@ -283,7 +306,7 @@ public final class InnerHitsContext { term = (String) fieldsVisitor.fields().get(ParentFieldMapper.NAME).get(0); } } - Filter filter = Queries.wrap(new TermQuery(new Term(field, term))); // Only include docs that have the current hit as parent + Filter filter = new QueryWrapperFilter(new TermQuery(new Term(field, term))); // Only include docs that have the current hit as parent Filter typeFilter = documentMapper.typeFilter(); // Only include docs that have this inner hits type. BooleanQuery filteredQuery = new BooleanQuery(); diff --git a/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java index 8b227e1d224..f30a0545d95 100644 --- a/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java +++ b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java @@ -122,7 +122,7 @@ public class PostingsHighlighter implements Highlighter { //we highlight every value separately calling the highlight method multiple times, only if we need to have back a snippet per value (whole value) int values = mergeValues ? 1 : textsToHighlight.size(); for (int i = 0; i < values; i++) { - Snippet[] fieldSnippets = highlighter.highlightDoc(fieldMapper.names().indexName(), mapperHighlighterEntry.filteredQueryTerms, hitContext.searcher(), hitContext.docId(), numberOfFragments); + Snippet[] fieldSnippets = highlighter.highlightDoc(fieldMapper.names().indexName(), mapperHighlighterEntry.filteredQueryTerms, hitContext.reader(), hitContext.docId(), numberOfFragments); if (fieldSnippets != null) { for (Snippet fieldSnippet : fieldSnippets) { if (Strings.hasText(fieldSnippet.getText())) { diff --git a/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 0e38c150030..482e3ef9153 100644 --- a/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -20,9 +20,10 @@ package org.elasticsearch.search.internal; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; @@ -151,7 +152,8 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { // this will only get applied to the actual search collector and not // to any scoped collectors, also, it will only be applied to the main collector // since that is where the filter should only work - collector = new FilteredCollector(collector, searchContext.parsedPostFilter().filter()); + final Weight filterWeight = createNormalizedWeight(searchContext.parsedPostFilter().filter(), false); + collector = new FilteredCollector(collector, filterWeight); } if (queryCollectors != null && !queryCollectors.isEmpty()) { ArrayList allCollectors = new ArrayList<>(queryCollectors.values()); @@ -194,7 +196,9 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { if (searchContext.aliasFilter() == null) { return super.explain(query, doc); } - FilteredQuery filteredQuery = new FilteredQuery(query, searchContext.aliasFilter()); + BooleanQuery filteredQuery = new BooleanQuery(); + filteredQuery.add(query, Occur.MUST); + filteredQuery.add(searchContext.aliasFilter(), Occur.FILTER); return super.explain(filteredQuery, doc); } finally { searchContext.clearReleasables(Lifetime.COLLECTION); diff --git a/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index f0839688761..cd50594dc0e 100644 --- a/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.ScoreDoc; @@ -44,7 +43,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.FieldMapper; @@ -235,14 +233,17 @@ public class DefaultSearchContext extends SearchContext { if (queryBoost() != 1.0f) { parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new BoostScoreFunction(queryBoost)), parsedQuery())); } - Filter searchFilter = searchFilter(types()); + Query searchFilter = searchFilter(types()); if (searchFilter != null) { if (Queries.isConstantMatchAllQuery(query())) { Query q = new ConstantScoreQuery(searchFilter); q.setBoost(query().getBoost()); parsedQuery(new ParsedQuery(q, parsedQuery())); } else { - parsedQuery(new ParsedQuery(new FilteredQuery(query(), searchFilter), parsedQuery())); + BooleanQuery filtered = new BooleanQuery(); + filtered.add(query(), Occur.MUST); + filtered.add(searchFilter, Occur.FILTER); + parsedQuery(new ParsedQuery(filtered, parsedQuery())); } } } @@ -255,12 +256,12 @@ public class DefaultSearchContext extends SearchContext { } BooleanQuery bq = new BooleanQuery(); if (filter != null) { - bq.add(filterCache().cache(filter, null, indexService.queryParserService().autoFilterCachePolicy()), Occur.MUST); + bq.add(filter, Occur.MUST); } if (aliasFilter != null) { bq.add(aliasFilter, Occur.MUST); } - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } @Override @@ -480,11 +481,6 @@ public class DefaultSearchContext extends SearchContext { return bigArrays; } - @Override - public FilterCache filterCache() { - return indexService.cache().filter(); - } - @Override public BitsetFilterCache bitsetFilterCache() { return indexService.bitsetFilterCache(); diff --git a/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 3a6a48531f0..7fe5373b5e5 100644 --- a/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.internal; -import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; @@ -79,7 +78,7 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public Filter searchFilter(String[] types) { + public Query searchFilter(String[] types) { return in.searchFilter(types); } @@ -288,11 +287,6 @@ public abstract class FilteredSearchContext extends SearchContext { return in.bigArrays(); } - @Override - public FilterCache filterCache() { - return in.filterCache(); - } - @Override public BitsetFilterCache bitsetFilterCache() { return in.bitsetFilterCache(); @@ -364,7 +358,7 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public Filter aliasFilter() { + public Query aliasFilter() { return in.aliasFilter(); } diff --git a/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/src/main/java/org/elasticsearch/search/internal/SearchContext.java index f5377c98040..1ae74abaaf9 100644 --- a/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.internal; import com.google.common.collect.Iterables; import com.google.common.collect.Multimap; import com.google.common.collect.MultimapBuilder; -import org.apache.lucene.search.Filter; + import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; @@ -34,7 +34,6 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMappers; @@ -43,7 +42,6 @@ import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.ParsedFilter; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; @@ -112,7 +110,7 @@ public abstract class SearchContext implements Releasable { */ public abstract void preProcess(); - public abstract Filter searchFilter(String[] types); + public abstract Query searchFilter(String[] types); public abstract long id(); @@ -213,8 +211,6 @@ public abstract class SearchContext implements Releasable { public abstract BigArrays bigArrays(); - public abstract FilterCache filterCache(); - public abstract BitsetFilterCache bitsetFilterCache(); public abstract IndexFieldDataService fieldData(); @@ -243,7 +239,7 @@ public abstract class SearchContext implements Releasable { public abstract ParsedFilter parsedPostFilter(); - public abstract Filter aliasFilter(); + public abstract Query aliasFilter(); public abstract SearchContext parsedQuery(ParsedQuery query); diff --git a/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java b/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java index 4d8f618ba79..5e6a6df5fc0 100644 --- a/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java +++ b/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.search.lookup; -import org.apache.lucene.index.CompositeReader; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.util.MinimalMap; @@ -40,14 +40,11 @@ public class LeafIndexLookup extends MinimalMap { // The parent reader from which we can get proper field and term // statistics - private final CompositeReader parentReader; + private final IndexReader parentReader; // we need this later to get the field and term statistics of the shard private final IndexSearcher indexSearcher; - // we need this later to get the term statistics of the shard - private final IndexReaderContext indexReaderContext; - // current docId private int docId = -1; @@ -90,15 +87,9 @@ public class LeafIndexLookup extends MinimalMap { public LeafIndexLookup(LeafReaderContext ctx) { reader = ctx.reader(); - if (ctx.parent != null) { - parentReader = ctx.parent.reader(); - indexSearcher = new IndexSearcher(parentReader); - indexReaderContext = ctx.parent; - } else { - parentReader = null; - indexSearcher = null; - indexReaderContext = null; - } + parentReader = ReaderUtil.getTopLevelContext(ctx).reader(); + indexSearcher = new IndexSearcher(parentReader); + indexSearcher.setQueryCache(null); } public void setDocument(int docId) { @@ -175,13 +166,10 @@ public class LeafIndexLookup extends MinimalMap { } public IndexSearcher getIndexSearcher() { - if (indexSearcher == null) { - return new IndexSearcher(reader); - } return indexSearcher; } public IndexReaderContext getReaderContext() { - return indexReaderContext; + return getParentReader().getContext(); } } diff --git a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java index 4993c426629..ae16834f7af 100644 --- a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java @@ -166,9 +166,9 @@ public class GeoDistanceSortParser implements SortParser { BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getNestedObjectMapper().nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { diff --git a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index 7caf89e9c08..651e5ab3a8f 100644 --- a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -139,9 +139,9 @@ public class ScriptSortParser implements SortParser { BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getNestedObjectMapper().nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { diff --git a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java index aa2f1315960..3dcaf5a4896 100644 --- a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java +++ b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java @@ -255,9 +255,9 @@ public class SortParseElement implements SearchParseElement { BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getNestedObjectMapper().nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { diff --git a/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java index 01abea2b4f4..43165fa4b1c 100644 --- a/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java +++ b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java @@ -82,7 +82,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { Query query = new TermQuery(new Term("body", "highlighting")); BytesRef[] queryTerms = filterTerms(extractTerms(searcher, query), "body", true); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -90,15 +90,15 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, false, Integer.MAX_VALUE - 1, 0); highlighter.setBreakIterator(new WholeBreakIterator()); - Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is a test. Just a test highlighting from postings highlighter.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the second value to perform highlighting on.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the third value to test highlighting with postings.")); @@ -106,15 +106,15 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { //let's try without whole break iterator as well, to prove that highlighting works the same when working per value (not optimized though) highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, false, Integer.MAX_VALUE - 1, 0); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("Just a test highlighting from postings highlighter.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the second value to perform highlighting on.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the third value to test highlighting with postings.")); @@ -177,7 +177,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { Query query = new TermQuery(new Term("body", "highlighting")); BytesRef[] queryTerms = filterTerms(extractTerms(searcher, query), "body", true); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -190,7 +190,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { boolean mergeValues = true; CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0); - Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(4)); @@ -205,7 +205,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0); List snippets2 = new ArrayList<>(); for (int i = 0; i < fieldValues.size(); i++) { - snippets2.addAll(Arrays.asList(highlighter.highlightDoc("body", queryTerms, searcher, docId, 5))); + snippets2.addAll(Arrays.asList(highlighter.highlightDoc("body", queryTerms, ir, docId, 5))); } assertThat(snippets2.size(), equalTo(4)); @@ -292,7 +292,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { Query query = new TermQuery(new Term("body", "highlighting")); BytesRef[] queryTerms = filterTerms(extractTerms(searcher, query), "body", true); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -305,7 +305,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { boolean mergeValues = true; CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0); - Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(4)); @@ -379,7 +379,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { Query query = new TermQuery(new Term("none", "highlighting")); IndexSearcher searcher = newSearcher(ir); SortedSet queryTerms = extractTerms(searcher, query); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -392,7 +392,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { //no snippets with simulated require field match (we filter the terms ourselves) boolean requireFieldMatch = true; BytesRef[] filteredQueryTerms = filterTerms(queryTerms, "body", requireFieldMatch); - Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(0)); @@ -400,7 +400,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { //one snippet without require field match, just passing in the query terms with no filtering on our side requireFieldMatch = false; filteredQueryTerms = filterTerms(queryTerms, "body", requireFieldMatch); - snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("Just a test highlighting from postings.")); @@ -435,7 +435,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { IndexSearcher searcher = newSearcher(ir); SortedSet queryTerms = extractTerms(searcher, query); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -446,11 +446,11 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { CustomPassageFormatter passageFormatter = new CustomPassageFormatter("", "", new DefaultEncoder()); CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, 0); - Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(0)); highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, scaledRandomIntBetween(1, 10)); - snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is a test.")); diff --git a/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java b/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java index b20e544866d..7bd77155962 100644 --- a/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java +++ b/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java @@ -26,8 +26,6 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.highlight.DefaultEncoder; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestUtil; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java index 9a319f77b9e..1e533ef5333 100644 --- a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java +++ b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java @@ -195,7 +195,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { .get(); assertSearchResponse(searchResponse); Global global = searchResponse.getAggregations().get("global"); - Terms terms = global.getAggregations().get("test"); + Terms terms = global.getAggregations().get("test");System.out.println(searchResponse); assertThat(terms.getBuckets().size(), equalTo(4)); logger.info("--> checking single filtering alias search with global facets and sort"); @@ -948,7 +948,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { @Test public void testAliasFilterWithNowInRangeFilterAndQuery() throws Exception { assertAcked(prepareCreate("my-index").addMapping("my-type", "_timestamp", "enabled=true")); - assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter1", rangeFilter("_timestamp").cache(randomBoolean()).from("now-1d").to("now"))); + assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter1", rangeFilter("_timestamp").from("now-1d").to("now"))); assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter2", queryFilter(rangeQuery("_timestamp").from("now-1d").to("now")))); final int numDocs = scaledRandomIntBetween(5, 52); diff --git a/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java index 086966cef6b..8445f8a1f45 100644 --- a/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java +++ b/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.common.StopWatch; @@ -211,7 +210,7 @@ public class TimeDataHistogramAggregationBenchmark { private static SearchResponse doTermsAggsSearch(String name, String field, float matchPercentage) { SearchResponse response = client.prepareSearch() .setSize(0) - .setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.scriptFilter("random() terms) { + throw new UnsupportedOperationException(); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public float getValueForNormalization() throws IOException { + return 0; + } + + @Override + public void normalize(float norm, float topLevelBoost) { + } + + @Override + public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + return null; + } + + }; + } + } + + public void testBasics() throws IOException { + DummyIndexCacheableQuery query = new DummyIndexCacheableQuery(); + QueryUtils.check(query); + + Query rewritten = query.rewrite(new MultiReader(new IndexReader[0])); + QueryUtils.check(rewritten); + QueryUtils.checkUnequal(query, rewritten); + + Query rewritten2 = query.rewrite(new MultiReader(new IndexReader[0])); + QueryUtils.check(rewritten2); + QueryUtils.checkUnequal(rewritten, rewritten2); + } + + public void testCache() throws IOException { + Directory dir = newDirectory(); + LRUQueryCache cache = new LRUQueryCache(10000, Long.MAX_VALUE); + QueryCachingPolicy policy = QueryCachingPolicy.ALWAYS_CACHE; + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); + for (int i = 0; i < 10; ++i) { + writer.addDocument(new Document()); + } + + IndexReader reader = writer.getReader(); + IndexSearcher searcher = newSearcher(reader); + searcher.setQueryCache(cache); + searcher.setQueryCachingPolicy(policy); + + assertEquals(0, cache.getCacheSize()); + DummyIndexCacheableQuery query = new DummyIndexCacheableQuery(); + searcher.count(query); + int expectedCacheSize = reader.leaves().size(); + assertEquals(expectedCacheSize, cache.getCacheSize()); + searcher.count(query); + assertEquals(expectedCacheSize, cache.getCacheSize()); + + writer.addDocument(new Document()); + + DirectoryReader reader2 = writer.getReader(); + searcher = newSearcher(reader2); + searcher.setQueryCache(cache); + searcher.setQueryCachingPolicy(policy); + + // since the query is only cacheable at the index level, it has to be recomputed on all leaves + expectedCacheSize += reader2.leaves().size(); + searcher.count(query); + assertEquals(expectedCacheSize, cache.getCacheSize()); + searcher.count(query); + assertEquals(expectedCacheSize, cache.getCacheSize()); + + reader.close(); + reader2.close(); + writer.close(); + assertEquals(0, cache.getCacheSize()); + dir.close(); + } + +} diff --git a/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java b/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java new file mode 100644 index 00000000000..f6873d0c075 --- /dev/null +++ b/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ElasticsearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +public class ShardCoreKeyMapTests extends ElasticsearchTestCase { + + public void testMissingShard() throws IOException { + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + w.addDocument(new Document()); + try (IndexReader reader = w.getReader()) { + ShardCoreKeyMap map = new ShardCoreKeyMap(); + for (LeafReaderContext ctx : reader.leaves()) { + try { + map.add(ctx.reader()); + fail(); + } catch (IllegalArgumentException expected) { + // ok + } + } + } + } + } + + public void testBasics() throws IOException { + Directory dir1 = newDirectory(); + RandomIndexWriter w1 = new RandomIndexWriter(random(), dir1); + w1.addDocument(new Document()); + + Directory dir2 = newDirectory(); + RandomIndexWriter w2 = new RandomIndexWriter(random(), dir2); + w2.addDocument(new Document()); + + Directory dir3 = newDirectory(); + RandomIndexWriter w3 = new RandomIndexWriter(random(), dir3); + w3.addDocument(new Document()); + + ShardId shardId1 = new ShardId("index1", 1); + ShardId shardId2 = new ShardId("index1", 3); + ShardId shardId3 = new ShardId("index2", 2); + + ElasticsearchDirectoryReader reader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1); + ElasticsearchDirectoryReader reader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2); + ElasticsearchDirectoryReader reader3 = ElasticsearchDirectoryReader.wrap(w3.getReader(), shardId3); + + ShardCoreKeyMap map = new ShardCoreKeyMap(); + for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) { + for (LeafReaderContext ctx : reader.leaves()) { + map.add(ctx.reader()); + } + } + assertEquals(3, map.size()); + + // Adding them back is a no-op + for (LeafReaderContext ctx : reader1.leaves()) { + map.add(ctx.reader()); + } + assertEquals(3, map.size()); + + for (LeafReaderContext ctx : reader2.leaves()) { + assertEquals(shardId2, map.getShardId(ctx.reader().getCoreCacheKey())); + } + + w1.addDocument(new Document()); + ElasticsearchDirectoryReader newReader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1); + reader1.close(); + reader1 = newReader1; + + // same for reader2, but with a force merge to trigger evictions + w2.addDocument(new Document()); + w2.forceMerge(1); + ElasticsearchDirectoryReader newReader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2); + reader2.close(); + reader2 = newReader2; + + for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) { + for (LeafReaderContext ctx : reader.leaves()) { + map.add(ctx.reader()); + } + } + + final Set index1Keys = new HashSet<>(); + for (DirectoryReader reader : Arrays.asList(reader1, reader2)) { + for (LeafReaderContext ctx : reader.leaves()) { + index1Keys.add(ctx.reader().getCoreCacheKey()); + } + } + index1Keys.removeAll(map.getCoreKeysForIndex("index1")); + assertEquals(Collections.emptySet(), index1Keys); + + reader1.close(); + w1.close(); + reader2.close(); + w2.close(); + reader3.close(); + w3.close(); + assertEquals(0, map.size()); + + dir1.close(); + dir2.close(); + dir3.close(); + } + +} diff --git a/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java b/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java index 38054992298..5aabe4a594c 100644 --- a/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java +++ b/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; @@ -158,7 +159,7 @@ public class FreqTermsEnumTests extends ElasticsearchTestCase { } } } - filter = Queries.wrap(new TermsQuery(filterTerms)); + filter = new QueryWrapperFilter(new TermsQuery(filterTerms)); } private void addFreqs(Document doc, Map reference) { diff --git a/src/test/java/org/elasticsearch/count/query/CountQueryTests.java b/src/test/java/org/elasticsearch/count/query/CountQueryTests.java index 3e8b6dd8218..d06c2613e19 100644 --- a/src/test/java/org/elasticsearch/count/query/CountQueryTests.java +++ b/src/test/java/org/elasticsearch/count/query/CountQueryTests.java @@ -373,26 +373,6 @@ public class CountQueryTests extends ElasticsearchIntegrationTest { assertHitCount(client().prepareCount().setQuery(bool).get(), 1l); } - @Test - public void testFiltersWithCustomCacheKey() throws Exception { - createIndex("test"); - ensureGreen(); - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); - refresh(); - - CountResponse countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); - assertHitCount(countResponse, 1l); - - countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); - assertHitCount(countResponse, 1l); - - countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); - assertHitCount(countResponse, 1l); - - countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); - assertHitCount(countResponse, 1l); - } - @Test public void testMatchQueryNumeric() throws Exception { createIndex("test"); diff --git a/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java b/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java index 95f95defec2..ecf36582437 100644 --- a/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java +++ b/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java @@ -66,8 +66,8 @@ public class IndexAliasesServiceTests extends ElasticsearchSingleNodeTest { assertThat(indexAliasesService.hasAlias("dogs"), equalTo(true)); assertThat(indexAliasesService.hasAlias("turtles"), equalTo(false)); - assertThat(indexAliasesService.aliasFilter("cats").toString(), equalTo("cache(QueryWrapperFilter(animal:cat))")); - assertThat(indexAliasesService.aliasFilter("cats", "dogs").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:cat)) cache(QueryWrapperFilter(animal:dog)))")); + assertThat(indexAliasesService.aliasFilter("cats").toString(), equalTo("QueryWrapperFilter(animal:cat)")); + assertThat(indexAliasesService.aliasFilter("cats", "dogs").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:cat) QueryWrapperFilter(animal:dog))")); // Non-filtering alias should turn off all filters because filters are ORed assertThat(indexAliasesService.aliasFilter("all"), nullValue()); @@ -76,7 +76,7 @@ public class IndexAliasesServiceTests extends ElasticsearchSingleNodeTest { indexAliasesService.add("cats", filter(termFilter("animal", "feline"))); indexAliasesService.add("dogs", filter(termFilter("animal", "canine"))); - assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:canine)) cache(QueryWrapperFilter(animal:feline)))")); + assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:canine) QueryWrapperFilter(animal:feline))")); } @Test @@ -86,13 +86,13 @@ public class IndexAliasesServiceTests extends ElasticsearchSingleNodeTest { indexAliasesService.add("dogs", filter(termFilter("animal", "dog"))); assertThat(indexAliasesService.aliasFilter(), nullValue()); - assertThat(indexAliasesService.aliasFilter("dogs").toString(), equalTo("cache(QueryWrapperFilter(animal:dog))")); - assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:dog)) cache(QueryWrapperFilter(animal:cat)))")); + assertThat(indexAliasesService.aliasFilter("dogs").toString(), equalTo("QueryWrapperFilter(animal:dog)")); + assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:dog) QueryWrapperFilter(animal:cat))")); indexAliasesService.add("cats", filter(termFilter("animal", "feline"))); indexAliasesService.add("dogs", filter(termFilter("animal", "canine"))); - assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:canine)) cache(QueryWrapperFilter(animal:feline)))")); + assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:canine) QueryWrapperFilter(animal:feline))")); } @Test(expected = InvalidAliasNameException.class) diff --git a/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java b/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java index 72b0134b4ca..a57e81ff7f3 100644 --- a/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java +++ b/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java @@ -31,11 +31,11 @@ import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.index.Index; import org.elasticsearch.test.ElasticsearchTestCase; @@ -72,7 +72,7 @@ public class BitSetFilterCacheTest extends ElasticsearchTestCase { IndexSearcher searcher = new IndexSearcher(reader); BitsetFilterCache cache = new BitsetFilterCache(new Index("test"), ImmutableSettings.EMPTY); - BitDocIdSetFilter filter = cache.getBitDocIdSetFilter(Queries.wrap(new TermQuery(new Term("field", "value")))); + BitDocIdSetFilter filter = cache.getBitDocIdSetFilter(new QueryWrapperFilter(new TermQuery(new Term("field", "value")))); TopDocs docs = searcher.search(new ConstantScoreQuery(filter), 1); assertThat(docs.totalHits, equalTo(3)); diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 2ac8608fa96..d97c0fe769c 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.engine; import com.carrotsearch.randomizedtesting.annotations.Repeat; import com.carrotsearch.randomizedtesting.annotations.Seed; import com.google.common.collect.ImmutableMap; + import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -31,7 +32,14 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.*; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexDeletionPolicy; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LiveIndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -260,7 +268,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test } - }, new TranslogHandler(shardId.index().getName())); + }, new TranslogHandler(shardId.index().getName()), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()); return config; diff --git a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 87ea42c9dcf..69ae60591a6 100644 --- a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.IndexDeletionPolicy; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; @@ -252,7 +253,7 @@ public class ShadowEngineTests extends ElasticsearchTestCase { public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test } - }, null); + }, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()); return config; diff --git a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java index 5c29fe57713..b2964807d87 100644 --- a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java +++ b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java @@ -91,7 +91,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.index.search.NumericRangeFieldDataFilter; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ParentConstantScoreQuery; import org.elasticsearch.index.search.geo.GeoDistanceFilter; import org.elasticsearch.index.search.geo.GeoPolygonFilter; @@ -861,7 +860,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), rangeFilter("age").from(23).to(54).includeLower(true).includeUpper(false))).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); + new QueryWrapperFilter(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); assertEquals(expected, parsedQuery); } @@ -872,7 +871,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); + new QueryWrapperFilter(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); assertEquals(expected, parsedQuery); } @@ -883,7 +882,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); + new QueryWrapperFilter(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); assertEquals(expected, parsedQuery); } @@ -908,14 +907,14 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), boolFilter().must(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")).mustNot(termFilter("name.first", "shay2")).should(termFilter("name.first", "shay3")))).query(); BooleanQuery filter = new BooleanQuery(); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); filter.setMinimumNumberShouldMatch(1); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(filter)); + new QueryWrapperFilter(filter)); assertEquals(expected, parsedQuery); } @@ -926,14 +925,14 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool-filter.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery filter = new BooleanQuery(); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); filter.setMinimumNumberShouldMatch(1); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(filter)); + new QueryWrapperFilter(filter)); assertEquals(expected, parsedQuery); } @@ -942,9 +941,9 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), andFilter(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")))).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); - ConstantScoreQuery expected = new ConstantScoreQuery(Queries.wrap(and)); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + ConstantScoreQuery expected = new ConstantScoreQuery(new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -954,11 +953,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(and)); + new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -968,11 +967,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter-named.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(and)); + new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -982,11 +981,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter2.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(and)); + new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -995,9 +994,9 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), orFilter(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")))).query(); BooleanQuery or = new BooleanQuery(); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); - ConstantScoreQuery expected = new ConstantScoreQuery(Queries.wrap(or)); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); + ConstantScoreQuery expected = new ConstantScoreQuery(new QueryWrapperFilter(or)); assertEquals(expected, parsedQuery); } @@ -1007,11 +1006,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery or = new BooleanQuery(); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(or)); + new QueryWrapperFilter(or)); assertEquals(expected, parsedQuery); } @@ -1021,11 +1020,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter2.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery or = new BooleanQuery(); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(or)); + new QueryWrapperFilter(or)); assertEquals(expected, parsedQuery); } @@ -1033,7 +1032,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { public void testNotFilteredQueryBuilder() throws IOException { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), notFilter(termFilter("name.first", "shay1")))).query(); - ConstantScoreQuery expected = new ConstantScoreQuery(Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + ConstantScoreQuery expected = new ConstantScoreQuery(new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -1045,7 +1044,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { assertThat(parsedQuery, instanceOf(FilteredQuery.class)); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -1056,7 +1055,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -1067,7 +1066,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -2497,9 +2496,9 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = indexService.queryParserService(); Query parsedQuery = queryParser.parse(query).query(); assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class)); - assertThat(((ConstantScoreQuery) parsedQuery).getQuery(), instanceOf(CustomQueryWrappingFilter.class)); - assertThat(((CustomQueryWrappingFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery(), instanceOf(ParentConstantScoreQuery.class)); - assertThat(((CustomQueryWrappingFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery().toString(), equalTo("parent_filter[foo](filtered(*:*)->cache(QueryWrapperFilter(_type:foo)))")); + assertThat(((ConstantScoreQuery) parsedQuery).getQuery(), instanceOf(QueryWrapperFilter.class)); + assertThat(((QueryWrapperFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery(), instanceOf(ParentConstantScoreQuery.class)); + assertThat(((QueryWrapperFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery().toString(), equalTo("parent_filter[foo](filtered(*:*)->QueryWrapperFilter(_type:foo))")); SearchContext.removeCurrent(); } diff --git a/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java b/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java index 21bae1d20ba..d6aa83c341b 100644 --- a/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java @@ -135,10 +135,6 @@ public abstract class AbstractChildTests extends ElasticsearchSingleNodeTest { } } - static Filter wrap(Filter filter) { - return SearchContext.current().filterCache().cache(filter, null, SearchContext.current().indexShard().indexService().queryParserService().autoFilterCachePolicy()); - } - static BitDocIdSetFilter wrapWithBitSetFilter(Filter filter) { return SearchContext.current().bitsetFilterCache().getBitDocIdSetFilter(filter); } diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java index 4f7d62e1283..6dff9747127 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java @@ -38,6 +38,7 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; @@ -92,7 +93,7 @@ public class ChildrenConstantScoreQueryTests extends AbstractChildTests { Query childQuery = new TermQuery(new Term("field", "value")); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); + BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); Query query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child", parentFilter, 12, wrapWithBitSetFilter(Queries.newNonNestedFilter())); QueryUtils.check(query); } @@ -125,7 +126,7 @@ public class ChildrenConstantScoreQueryTests extends AbstractChildTests { )); TermQuery childQuery = new TermQuery(new Term("field1", "value" + (1 + random().nextInt(3)))); - BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); + BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); int shortCircuitParentDocSet = random().nextInt(5); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java index 3f2d4413984..52ffbf022ea 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java @@ -45,6 +45,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -109,7 +110,7 @@ public class ChildrenQueryTests extends AbstractChildTests { ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)]; ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); + BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); int minChildren = random().nextInt(10); int maxChildren = scaledRandomIntBetween(minChildren, 10); Query query = new ChildrenQuery(parentChildIndexFieldData, "parent", "child", parentFilter, childQuery, scoreType, minChildren, diff --git a/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java index 49496d8f6e6..feb320942b0 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java @@ -38,13 +38,13 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.Uid; @@ -93,7 +93,7 @@ public class ParentConstantScoreQueryTests extends AbstractChildTests { Query parentQuery = new TermQuery(new Term("field", "value")); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); + BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); Query query = new ParentConstantScoreQuery(parentChildIndexFieldData, parentQuery, "parent", childrenFilter); QueryUtils.check(query); } diff --git a/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java index 79b4a9bc79e..0614a6c2439 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java @@ -42,6 +42,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.search.join.BitDocIdSetFilter; @@ -49,7 +50,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.Uid; @@ -96,7 +96,7 @@ public class ParentQueryTests extends AbstractChildTests { Query parentQuery = new TermQuery(new Term("field", "value")); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); + BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); Query query = new ParentQuery(parentChildIndexFieldData, parentQuery, "parent", childrenFilter); QueryUtils.check(query); } diff --git a/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java index 4af03801c94..940e10e77df 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; @@ -216,8 +217,8 @@ public abstract class AbstractNumberNestedSortingTests extends AbstractFieldData MultiValueMode sortMode = MultiValueMode.SUM; IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false)); - Filter parentFilter = Queries.wrap(new TermQuery(new Term("__type", "parent"))); - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent"))); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(parentFilter, childFilter)); ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); @@ -251,7 +252,7 @@ public abstract class AbstractNumberNestedSortingTests extends AbstractFieldData assertThat(topDocs.scoreDocs[4].doc, equalTo(3)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9)); - childFilter = Queries.wrap(new TermQuery(new Term("filter_1", "T"))); + childFilter = new QueryWrapperFilter(new TermQuery(new Term("filter_1", "T"))); nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(parentFilter, childFilter)); query = new ToParentBlockJoinQuery( new FilteredQuery(new MatchAllDocsQuery(), childFilter), @@ -328,7 +329,7 @@ public abstract class AbstractNumberNestedSortingTests extends AbstractFieldData protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter)); Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); diff --git a/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java index 800320323cc..12776cec73a 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; @@ -69,7 +70,7 @@ public class DoubleNestedSortingTests extends AbstractNumberNestedSortingTests { @Override protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter)); Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); diff --git a/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java index b1b1433cdfc..12cd10a2cd2 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; @@ -68,7 +69,7 @@ public class FloatNestedSortingTests extends DoubleNestedSortingTests { protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter)); Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); diff --git a/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index 1029523a325..e4885727434 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; @@ -117,8 +118,8 @@ public class NestedSortingTests extends AbstractFieldDataTests { } private TopDocs getTopDocs(IndexSearcher searcher, IndexFieldData indexFieldData, String missingValue, MultiValueMode sortMode, int n, boolean reverse) throws IOException { - Filter parentFilter = Queries.wrap(new TermQuery(new Term("__type", "parent"))); - Filter childFilter = Queries.wrap(new TermQuery(new Term("__type", "child"))); + Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent"))); + Filter childFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "child"))); XFieldComparatorSource nestedComparatorSource = indexFieldData.comparatorSource(missingValue, sortMode, createNested(parentFilter, childFilter)); Query query = new ConstantScoreQuery(parentFilter); Sort sort = new Sort(new SortField("f", nestedComparatorSource, reverse)); @@ -283,8 +284,8 @@ public class NestedSortingTests extends AbstractFieldDataTests { MultiValueMode sortMode = MultiValueMode.MIN; IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false)); PagedBytesIndexFieldData indexFieldData = getForField("field2"); - Filter parentFilter = Queries.wrap(new TermQuery(new Term("__type", "parent"))); - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent"))); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(parentFilter, childFilter)); ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); @@ -324,7 +325,7 @@ public class NestedSortingTests extends AbstractFieldDataTests { BooleanQuery bq = new BooleanQuery(); bq.add(parentFilter, Occur.MUST_NOT); bq.add(new TermQuery(new Term("filter_1", "T")), Occur.MUST); - childFilter = Queries.wrap(bq); + childFilter = new QueryWrapperFilter(bq); nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(parentFilter, childFilter)); query = new ToParentBlockJoinQuery( new FilteredQuery(new MatchAllDocsQuery(), childFilter), diff --git a/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java b/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java index 36bdfd15af6..9013156a59b 100644 --- a/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java +++ b/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.cache.query; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; diff --git a/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java b/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java index 4ba7f711429..d992991fa18 100644 --- a/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java +++ b/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java @@ -19,12 +19,13 @@ package org.elasticsearch.indices.stats; -import org.apache.lucene.util.Version; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; +import org.apache.lucene.util.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; +import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; @@ -39,11 +40,13 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.cache.filter.FilterCacheModule; +import org.elasticsearch.index.cache.filter.FilterCacheStats; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider; import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; import org.elasticsearch.index.query.FilterBuilders; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.search.sort.SortOrder; @@ -59,9 +62,7 @@ import java.util.Random; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.filteredQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -77,39 +78,12 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { protected Settings nodeSettings(int nodeOrdinal) { //Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) - .put("indices.cache.filter.clean_interval", "1ms") .put(IndicesQueryCache.INDICES_CACHE_QUERY_CLEAN_INTERVAL, "1ms") .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class) + .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) .build(); } - @Test - public void testClearCacheFilterKeys() { - client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet(); - ensureGreen(); - client().prepareIndex("test", "type", "1").setSource("field", "value").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); - - NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet(); - assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet(); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - - SearchResponse searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), FilterBuilders.termFilter("field", "value").cacheKey("test_key"))).execute().actionGet(); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet(); - assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), greaterThan(0l)); - indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet(); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l)); - - client().admin().indices().prepareClearCache().setFilterKeys("test_key").execute().actionGet(); - nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet(); - assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet(); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - } - @Test public void testFieldDataStats() { client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet(); @@ -991,4 +965,90 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { } } + private void assertEquals(FilterCacheStats stats1, FilterCacheStats stats2) { + assertEquals(stats1.getCacheCount(), stats2.getCacheCount()); + assertEquals(stats1.getCacheSize(), stats2.getCacheSize()); + assertEquals(stats1.getEvictions(), stats2.getEvictions()); + assertEquals(stats1.getHitCount(), stats2.getHitCount()); + assertEquals(stats2.getMemorySizeInBytes(), stats2.getMemorySizeInBytes()); + assertEquals(stats1.getMissCount(), stats2.getMissCount()); + assertEquals(stats1.getTotalCount(), stats2.getTotalCount()); + } + + private void assertCumulativeFilterCacheStats(IndicesStatsResponse response) { + assertAllSuccessful(response); + FilterCacheStats total = response.getTotal().filterCache; + FilterCacheStats indexTotal = new FilterCacheStats(); + FilterCacheStats shardTotal = new FilterCacheStats(); + for (IndexStats indexStats : response.getIndices().values()) { + indexTotal.add(indexStats.getTotal().filterCache); + for (ShardStats shardStats : response.getShards()) { + shardTotal.add(shardStats.getStats().filterCache); + } + } + assertEquals(total, indexTotal); + assertEquals(total, shardTotal); + } + + public void testFilterCacheStats() throws Exception { + assertAcked(prepareCreate("index").setSettings("number_of_replicas", 0).get()); + indexRandom(true, + client().prepareIndex("index", "type", "1").setSource("foo", "bar"), + client().prepareIndex("index", "type", "2").setSource("foo", "baz")); + ensureGreen(); + + IndicesStatsResponse response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertEquals(0, response.getTotal().filterCache.getCacheSize()); + + SearchResponse r; + assertSearchResponse(r = client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); + + assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); + + assertTrue(client().prepareDelete("index", "type", "1").get().isFound()); + assertTrue(client().prepareDelete("index", "type", "2").get().isFound()); + refresh(); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getCacheCount(), greaterThan(0L)); + + indexRandom(true, + client().prepareIndex("index", "type", "1").setSource("foo", "bar"), + client().prepareIndex("index", "type", "2").setSource("foo", "baz")); + assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); + + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), greaterThan(0L)); + + assertAllSuccessful(client().admin().indices().prepareClearCache("index").setFilterCache(true).get()); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), equalTo(0L)); + } + } diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index bd664694c9f..f2487ec9e4f 100644 --- a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -346,7 +346,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { .addAlias(new Alias("templated_alias-{index}")) .addAlias(new Alias("filtered_alias").filter("{\"type\":{\"value\":\"type2\"}}")) .addAlias(new Alias("complex_filtered_alias") - .filter(FilterBuilders.termsFilter("_type", "typeX", "typeY", "typeZ").execution("bool").cache(true))) + .filter(FilterBuilders.termsFilter("_type", "typeX", "typeY", "typeZ").execution("bool"))) .get(); assertAcked(prepareCreate("test_index").addMapping("type1").addMapping("type2").addMapping("typeX").addMapping("typeY").addMapping("typeZ")); diff --git a/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java b/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java index deb955ed51c..5802d20f1dd 100644 --- a/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java +++ b/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.warmer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableList; + import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java index f585824a267..855f21de852 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; @@ -123,7 +122,7 @@ public class NestedAggregatorTest extends ElasticsearchSingleNodeTest { AggregationContext context = new AggregationContext(searchContext); AggregatorFactories.Builder builder = AggregatorFactories.builder(); - builder.addAggregator(new NestedAggregator.Factory("test", "nested_field", QueryCachingPolicy.ALWAYS_CACHE)); + builder.addAggregator(new NestedAggregator.Factory("test", "nested_field")); AggregatorFactories factories = builder.build(); searchContext.aggregations(new SearchContextAggregations(factories)); Aggregator[] aggs = factories.createTopLevelAggregators(context); diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java index 44b57045965..357a19afa60 100644 --- a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java +++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.cache.filter.FilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.FieldMapper.Loading; import org.elasticsearch.index.mapper.MergeMappingException; @@ -73,9 +73,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; import static com.google.common.collect.Maps.newHashMap; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath; -import static org.elasticsearch.common.settings.ImmutableSettings.builder; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.FilterBuilders.boolFilter; @@ -106,7 +104,14 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFa import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; /** * @@ -118,7 +123,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { protected Settings nodeSettings(int nodeOrdinal) { return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) // aggressive filter caching so that we can assert on the filter cache size - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class) + .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) .build(); } @@ -418,15 +423,15 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { for (int i = 1; i <= 10; i++) { logger.info("Round {}", i); SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(constantScoreQuery(queryFilter(topChildrenQuery("child", matchAllQuery())).cache(true))).execute() + .setQuery(constantScoreQuery(queryFilter(topChildrenQuery("child", matchAllQuery())))).execute() .actionGet(); assertNoFailures(searchResponse); searchResponse = client().prepareSearch("test") - .setQuery(constantScoreQuery(queryFilter(hasChildQuery("child", matchAllQuery()).scoreType("max")).cache(true))) + .setQuery(constantScoreQuery(queryFilter(hasChildQuery("child", matchAllQuery()).scoreType("max")))) .get(); assertNoFailures(searchResponse); searchResponse = client().prepareSearch("test") - .setQuery(constantScoreQuery(queryFilter(hasParentQuery("parent", matchAllQuery()).scoreType("score")).cache(true))) + .setQuery(constantScoreQuery(queryFilter(hasParentQuery("parent", matchAllQuery()).scoreType("score")))) .get(); assertNoFailures(searchResponse); } @@ -843,7 +848,8 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max")) .get(); assertThat(explainResponse.isExists(), equalTo(true)); - assertThat(explainResponse.getExplanation().toString(), equalTo("1.0 = sum of:\n 1.0 = not implemented yet...\n 0.0 = match on required clause, product of:\n 0.0 = # clause\n 0.0 = Match on id 0\n")); + // TODO: improve test once explanations are actually implemented + assertThat(explainResponse.getExplanation().toString(), startsWith("1.0 =")); } List createDocBuilders() { @@ -1085,41 +1091,6 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { assertSearchHit(searchResponse, 1, hasId("2")); } - @Test - public void testHasChildAndHasParentWrappedInAQueryFilterShouldNeverGetCached() throws Exception { - assertAcked(prepareCreate("test") - .setSettings(ImmutableSettings.builder().put("index.cache.filter.type", "weighted")) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - client().prepareIndex("test", "parent", "1").setSource("p_field", 1).get(); - client().prepareIndex("test", "child", "2").setParent("1").setSource("c_field", 1).get(); - refresh(); - - for (int i = 0; i < 10; i++) { - SearchResponse searchResponse = client().prepareSearch("test") - .setExplain(true) - .setQuery(constantScoreQuery(boolFilter() - .must(queryFilter(hasChildQuery("child", matchQuery("c_field", 1)))) - .cache(true) - )).get(); - assertSearchHit(searchResponse, 1, hasId("1")); - // Can't start with ConstantScore(cache(BooleanFilter( - assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), startsWith("ConstantScore(CustomQueryWrappingFilter(")); - - searchResponse = client().prepareSearch("test") - .setExplain(true) - .setQuery(constantScoreQuery(boolFilter() - .must(queryFilter(boolQuery().must(matchAllQuery()).must(hasChildQuery("child", matchQuery("c_field", 1))))) - .cache(true) - )).get(); - assertSearchHit(searchResponse, 1, hasId("1")); - // Can't start with ConstantScore(cache(BooleanFilter( - assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), startsWith("ConstantScore(CustomQueryWrappingFilter(")); - } - } - @Test public void testSimpleQueryRewrite() throws Exception { assertAcked(prepareCreate("test") @@ -1797,8 +1768,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { SearchResponse searchResponse = client().prepareSearch() .setQuery(filteredQuery(matchAllQuery(), boolFilter() .must(FilterBuilders.hasChildFilter("child", matchQuery("c_field", "red"))) - .must(matchAllFilter()) - .cache(true))) + .must(matchAllFilter()))) .get(); assertThat(searchResponse.getHits().totalHits(), equalTo(2l)); } @@ -1810,8 +1780,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { SearchResponse searchResponse = client().prepareSearch() .setQuery(filteredQuery(matchAllQuery(), boolFilter() .must(FilterBuilders.hasChildFilter("child", matchQuery("c_field", "red"))) - .must(matchAllFilter()) - .cache(true))) + .must(matchAllFilter()))) .get(); assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); @@ -1862,104 +1831,6 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { } } - @Test - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/10897") - public void testValidateThatHasChildAndHasParentFilterAreNeverCached() throws Exception { - assertAcked(prepareCreate("test") - .setSettings(builder().put(indexSettings()) - //we need 0 replicas here to make sure we always hit the very same shards - .put(SETTING_NUMBER_OF_REPLICAS, 0)) - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - client().prepareIndex("test", "parent", "1").setSource("field", "value") - .get(); - client().prepareIndex("test", "child", "1").setParent("1").setSource("field", "value") - .setRefresh(true) - .get(); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(hasChildQuery("child", matchAllQuery())) - .get(); - assertHitCount(searchResponse, 1l); - - searchResponse = client().prepareSearch("test") - .setQuery(hasParentQuery("parent", matchAllQuery())) - .get(); - assertHitCount(searchResponse, 1l); - - // Internally the has_child and has_parent use filter for the type field, which end up in the filter cache, - // so by first checking how much they take by executing has_child and has_parent *query* we can set a base line - // for the filter cache size in this test. - IndicesStatsResponse statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - long initialCacheSize = statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true))) - .get(); - assertHitCount(searchResponse, 1l); - - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize)); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true))) - .get(); - assertHitCount(searchResponse, 1l); - - // filter cache should not contain any thing, b/c has_child and has_parent can't be cached. - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize)); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.matchAllFilter()) - .must(FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.matchAllFilter()) - .must(FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - // filter cache should not contain any thing, b/c has_child and has_parent can't be cached. - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize)); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.termFilter("field", "value").cache(true)) - .must(FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.termFilter("field", "value").cache(true)) - .must(FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - // filter cache should not contain any thing, b/c has_child and has_parent can't be cached. - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(initialCacheSize)); - } - // https://github.com/elasticsearch/elasticsearch/issues/5783 @Test public void testQueryBeforeChildType() throws Exception { diff --git a/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java b/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java index 8e390c4dcfd..5bf2a8a38d6 100644 --- a/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java +++ b/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java @@ -31,12 +31,12 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext.NestedInnerHits.NestedChildrenFilter; import org.elasticsearch.test.ElasticsearchTestCase; @@ -79,8 +79,8 @@ public class NestedChildrenFilterTest extends ElasticsearchTestCase { IndexSearcher searcher = new IndexSearcher(reader); FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); - BitDocIdSetFilter parentFilter = new BitDocIdSetCachingWrapperFilter(Queries.wrap(new TermQuery(new Term("type", "parent")))); - Filter childFilter = Queries.wrap(new TermQuery(new Term("type", "child"))); + BitDocIdSetFilter parentFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("type", "parent")))); + Filter childFilter = new QueryWrapperFilter(new TermQuery(new Term("type", "child"))); int checkedParents = 0; for (LeafReaderContext leaf : reader.leaves()) { DocIdSetIterator parents = parentFilter.getDocIdSet(leaf).iterator(); diff --git a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java index 5157c235349..13f95320993 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java @@ -172,7 +172,7 @@ public class FunctionScoreTests extends ElasticsearchIntegrationTest { SearchResponse responseWithWeights = client().search( searchRequest().source( searchSource().query( - functionScoreQuery(termFilter(TEXT_FIELD, "value").cache(false)) + functionScoreQuery(termFilter(TEXT_FIELD, "value")) .add(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km")) .add(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN).setWeight(2)) .add(scriptFunction("_index['" + TEXT_FIELD + "']['value'].tf()").setWeight(3)) @@ -184,7 +184,7 @@ public class FunctionScoreTests extends ElasticsearchIntegrationTest { responseWithWeights = client().search( searchRequest().source( searchSource().query( - functionScoreQuery(termFilter(TEXT_FIELD, "value").cache(false)) + functionScoreQuery(termFilter(TEXT_FIELD, "value")) .add(weightFactorFunction(4.0f)) ).explain(true))).actionGet(); assertThat(responseWithWeights.getHits().getAt(0).getExplanation().toString(), diff --git a/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java b/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java index bf76e8c3b5a..4b7eeadf59e 100644 --- a/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java +++ b/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java @@ -28,7 +28,6 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; import org.apache.lucene.spatial.query.UnsupportedSpatialOperation; -import org.apache.lucene.util.LuceneTestCase.Slow; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkResponse; @@ -498,7 +497,6 @@ public class GeoFilterTests extends ElasticsearchIntegrationTest { Map expectedCounts = new HashMap<>(); Map expectedResults = new HashMap<>(); - Map cacheKeys = new HashMap<>(); expectedCounts.put(geoHashCellFilter("pin", geohash, false), 1L); @@ -516,19 +514,6 @@ public class GeoFilterTests extends ElasticsearchIntegrationTest { for (int j = filterBuilders.size() * 2 * randomIntBetween(1, 5); j > 0; j--) { Collections.shuffle(filterBuilders, getRandom()); for (GeohashCellFilter.Builder builder : filterBuilders) { - if (randomBoolean()) { - builder.cache(randomBoolean()); - } - if (randomBoolean()) { - String cacheKey = cacheKeys.get(builder); - if (cacheKey == null) { - cacheKey = randomUnicodeOfLength(6); - cacheKeys.put(builder, cacheKey); - } - builder.cacheKey(cacheKey); - } else { - builder.cacheKey(null); - } try { long expectedCount = expectedCounts.get(builder); SearchResponse response = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()) diff --git a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java index 8d8e948f769..e72cad5dfdd 100644 --- a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java +++ b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java @@ -808,10 +808,10 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { ensureGreen(); client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); refresh(); - SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); + SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); assertHitCount(searchResponse, 1l); - searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); + searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); assertHitCount(searchResponse, 1l); searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); diff --git a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java index 1f071e95bb5..286e7f72af8 100644 --- a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java +++ b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java @@ -20,12 +20,11 @@ package org.elasticsearch.search.scriptfilter; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.cache.filter.FilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -36,8 +35,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.FilterBuilders.scriptFilter; import static org.elasticsearch.index.query.QueryBuilders.filteredQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; /** @@ -50,7 +47,7 @@ public class ScriptFilterSearchTests extends ElasticsearchIntegrationTest { protected Settings nodeSettings(int nodeOrdinal) { return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) // aggressive filter caching so that we can assert on the number of iterations of the script filters - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class) + .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) .build(); } @@ -116,58 +113,4 @@ public class ScriptFilterSearchTests extends ElasticsearchIntegrationTest { public static int incrementScriptCounter() { return scriptCounter.incrementAndGet(); } - - @Test - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/10897") - public void testCustomScriptCache() throws Exception { - assertAcked(prepareCreate("test").setSettings( - ImmutableSettings.settingsBuilder() - //needs to run without replicas to validate caching behaviour and make sure we always hit the very shame shard - .put(indexSettings()) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))); - client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("test", "1").field("num", 1.0f).endObject()).execute().actionGet(); - flush(); - client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("test", "2").field("num", 2.0f).endObject()).execute().actionGet(); - flush(); - client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("test", "3").field("num", 3.0f).endObject()).execute().actionGet(); - flushAndRefresh(); - - String script = "org.elasticsearch.search.scriptfilter.ScriptFilterSearchTests.incrementScriptCounter() > 0"; - - scriptCounter.set(0); - logger.info("running script filter the first time"); - SearchResponse response = client().prepareSearch() - .setQuery(filteredQuery(termQuery("test", "1"), scriptFilter(script).cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(1l)); - assertThat(scriptCounter.get(), equalTo(3)); - - scriptCounter.set(0); - logger.info("running script filter the second time"); - response = client().prepareSearch() - .setQuery(filteredQuery(termQuery("test", "2"), scriptFilter(script).cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(1l)); - assertThat(scriptCounter.get(), equalTo(0)); - - scriptCounter.set(0); - logger.info("running script filter with new parameters"); - response = client().prepareSearch() - .setQuery(filteredQuery(termQuery("test", "1"), scriptFilter(script).addParam("param1", "1").cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(1l)); - assertThat(scriptCounter.get(), equalTo(3)); - - scriptCounter.set(0); - logger.info("running script filter with same parameters"); - response = client().prepareSearch() - .setQuery(filteredQuery(matchAllQuery(), scriptFilter(script).addParam("param1", "1").cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(3l)); - assertThat(scriptCounter.get(), equalTo(0)); - } -} \ No newline at end of file +} diff --git a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java index 457c9278222..c048e1f5ed1 100644 --- a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java +++ b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java @@ -20,8 +20,6 @@ package org.elasticsearch.search.sort; -import com.carrotsearch.randomizedtesting.annotations.Repeat; - import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -134,7 +132,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { ensureYellow(); SearchResponse allDocsResponse = client().prepareSearch().setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.boolFilter().must(FilterBuilders.termFilter("foo", "bar"), - FilterBuilders.rangeFilter("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01").cache(randomBoolean())))) + FilterBuilders.rangeFilter("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01")))) .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) .setSize(docs).get(); assertSearchResponse(allDocsResponse); @@ -143,7 +141,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { for (int i = 0; i < numiters; i++) { SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.boolFilter().must(FilterBuilders.termFilter("foo", "bar"), - FilterBuilders.rangeFilter("timeUpdated").gte("2014/" + String.format(Locale.ROOT, "%02d", randomIntBetween(1, 7)) + "/01").cache(randomBoolean())))) + FilterBuilders.rangeFilter("timeUpdated").gte("2014/" + String.format(Locale.ROOT, "%02d", randomIntBetween(1, 7)) + "/01")))) .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) .setSize(scaledRandomIntBetween(1, docs)).get(); assertSearchResponse(searchResponse); diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 187afe1b658..109bd030023 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -114,7 +114,6 @@ import org.elasticsearch.index.translog.TranslogService; import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.index.translog.fs.FsTranslogFile; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoverySettings; @@ -520,7 +519,6 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase if (random.nextBoolean()) { builder.put(IndicesQueryCache.INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); builder.put(IndicesFieldDataCache.FIELDDATA_CACHE_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); - builder.put(IndicesFilterCache.INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); } if (random.nextBoolean()) { builder.put(NettyTransport.PING_SCHEDULE, RandomInts.randomIntBetween(random, 100, 2000) + "ms"); diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java index 0f71b7239e0..02c02b2ed6e 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java @@ -30,7 +30,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; import com.google.common.base.Predicate; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.uninverting.UninvertingReader; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; @@ -60,7 +59,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.junit.*; import org.junit.rules.RuleChain; -import java.io.Closeable; import java.io.IOException; import java.lang.reflect.Field; import java.nio.file.FileSystem; @@ -128,19 +126,6 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { protected void afterIfSuccessful() { } - // TODO: Parent/child and other things does not work with the query cache - // We must disable query cache for both suite and test to override lucene, but LTC resets it after the suite - - @BeforeClass - public static void disableQueryCacheSuite() { - IndexSearcher.setDefaultQueryCache(null); - } - - @Before - public final void disableQueryCache() { - IndexSearcher.setDefaultQueryCache(null); - } - // setup mock filesystems for this test run. we change PathUtils // so that all accesses are plumbed thru any mock wrappers diff --git a/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/src/test/java/org/elasticsearch/test/InternalTestCluster.java index 5d2d00c4870..4c857c24027 100644 --- a/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -78,8 +78,8 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.filter.FilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.index.cache.filter.none.NoneFilterCache; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.shard.IndexShardModule; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreModule; @@ -449,7 +449,7 @@ public final class InternalTestCluster extends TestCluster { } if (random.nextBoolean()) { - builder.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? WeightedFilterCache.class : NoneFilterCache.class); + builder.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? IndexFilterCache.class : NoneFilterCache.class); } if (random.nextBoolean()) { diff --git a/src/test/java/org/elasticsearch/test/TestSearchContext.java b/src/test/java/org/elasticsearch/test/TestSearchContext.java index e1ccd525546..bcfa48a5813 100644 --- a/src/test/java/org/elasticsearch/test/TestSearchContext.java +++ b/src/test/java/org/elasticsearch/test/TestSearchContext.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.ParsedFilter; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; @@ -69,7 +68,6 @@ public class TestSearchContext extends SearchContext { final PageCacheRecycler pageCacheRecycler; final BigArrays bigArrays; final IndexService indexService; - final FilterCache filterCache; final IndexFieldDataService indexFieldDataService; final BitsetFilterCache fixedBitSetFilterCache; final ThreadPool threadPool; @@ -84,7 +82,6 @@ public class TestSearchContext extends SearchContext { this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays.withCircuitBreaking(); this.indexService = indexService; - this.filterCache = indexService.cache().filter(); this.indexFieldDataService = indexService.fieldData(); this.fixedBitSetFilterCache = indexService.bitsetFilterCache(); this.threadPool = threadPool; @@ -94,7 +91,6 @@ public class TestSearchContext extends SearchContext { this.pageCacheRecycler = null; this.bigArrays = null; this.indexService = null; - this.filterCache = null; this.indexFieldDataService = null; this.threadPool = null; this.fixedBitSetFilterCache = null; @@ -313,11 +309,6 @@ public class TestSearchContext extends SearchContext { return bigArrays; } - @Override - public FilterCache filterCache() { - return filterCache; - } - @Override public BitsetFilterCache bitsetFilterCache() { return fixedBitSetFilterCache; diff --git a/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java b/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java index e0e1d2db4ea..b321a0dfbb2 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -24,6 +24,8 @@ import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.AssertingIndexSearcher; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.QueryCache; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; @@ -56,6 +58,8 @@ public final class MockEngineSupport { private final AtomicBoolean closing = new AtomicBoolean(false); private final ESLogger logger = Loggers.getLogger(Engine.class); private final ShardId shardId; + private final QueryCache filterCache; + private final QueryCachingPolicy filterCachingPolicy; private final SearcherCloseable searcherCloseable; private final MockContext mockContext; @@ -78,13 +82,15 @@ public final class MockEngineSupport { public MockEngineSupport(EngineConfig config) { Settings indexSettings = config.getIndexSettings(); shardId = config.getShardId(); + filterCache = config.getFilterCache(); + filterCachingPolicy = config.getFilterCachingPolicy(); final long seed = indexSettings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l); Random random = new Random(seed); final double ratio = indexSettings.getAsDouble(WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow Class wrapper = indexSettings.getAsClass(READER_WRAPPER_TYPE, AssertingDirectoryReader.class); boolean wrapReader = random.nextDouble() < ratio; if (logger.isTraceEnabled()) { - logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), config.getShardId(), seed, wrapReader); + logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), shardId, seed, wrapReader); } mockContext = new MockContext(random, wrapReader, wrapper, indexSettings); this.searcherCloseable = new SearcherCloseable(); @@ -123,6 +129,8 @@ public final class MockEngineSupport { // this executes basic query checks and asserts that weights are normalized only once etc. final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader); assertingIndexSearcher.setSimilarity(searcher.getSimilarity()); + assertingIndexSearcher.setQueryCache(filterCache); + assertingIndexSearcher.setQueryCachingPolicy(filterCachingPolicy); return assertingIndexSearcher; } From fe7d018f0cb431d16062f18d3f7552781caf4ea6 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 4 May 2015 10:33:47 +0200 Subject: [PATCH 18/21] [TEST] make LuceneTest extraFS proof --- .../java/org/elasticsearch/common/lucene/LuceneTest.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java index 0420f4b2966..816409675af 100644 --- a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java +++ b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java @@ -144,8 +144,11 @@ public class LuceneTest extends ElasticsearchTestCase { } Lucene.cleanLuceneIndex(dir); if (dir.listAll().length > 0) { - assertEquals(dir.listAll().length, 1); - assertEquals(dir.listAll()[0], "write.lock"); + for (String file : dir.listAll()) { + if (file.startsWith("extra") == false) { + assertEquals(file, "write.lock"); + } + } } dir.close(); } @@ -200,7 +203,7 @@ public class LuceneTest extends ElasticsearchTestCase { assertEquals(s.search(new TermQuery(new Term("id", "4")), 1).totalHits, 0); for (String file : dir.listAll()) { - assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2")); + assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2") || file.startsWith("extra")); } open.close(); dir.close(); From 23ac32e616533137e8132124cadeb6ea08b78ea2 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 29 Apr 2015 21:07:51 +0200 Subject: [PATCH 19/21] Remove old 0.90 shard allocator the `even_shard` allocator has been replaced years ago in early 0.90. We can remove it now in 2.0 since the new one is considered stable. --- .../allocator/EvenShardsCountAllocator.java | 245 ------------------ .../allocator/ShardsAllocatorModule.java | 11 +- .../ShardsAllocatorModuleTests.java | 13 +- 3 files changed, 13 insertions(+), 256 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/EvenShardsCountAllocator.java diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/EvenShardsCountAllocator.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/EvenShardsCountAllocator.java deleted file mode 100644 index 96b1a8e4de5..00000000000 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/EvenShardsCountAllocator.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing.allocation.allocator; - -import com.carrotsearch.hppc.ObjectIntOpenHashMap; -import org.elasticsearch.cluster.routing.MutableShardRouting; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingNodes; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; -import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; -import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; - -import java.util.Arrays; -import java.util.Comparator; -import java.util.Iterator; -import java.util.List; - -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; -import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; - -/** - * A {@link ShardsAllocator} that tries to balance shards across nodes in the - * cluster such that each node holds approximatly the same number of shards. The - * allocations algorithm operates on a cluster ie. is index-agnostic. While the - * number of shards per node might be balanced across the cluster a single node - * can hold mulitple shards from a single index such that the shard of an index - * are not necessarily balanced across nodes. Yet, due to high-level - * {@link AllocationDecider decisions} multiple instances of the same shard - * won't be allocated on the same node. - *

    - * During {@link #rebalance(RoutingAllocation) re-balancing} the allocator takes - * shards from the most busy nodes and tries to relocate the shards to - * the least busy node until the number of shards per node are equal for all - * nodes in the cluster or until no shards can be relocated anymore. - *

    - */ -public class EvenShardsCountAllocator extends AbstractComponent implements ShardsAllocator { - - @Inject - public EvenShardsCountAllocator(Settings settings) { - super(settings); - } - - @Override - public void applyStartedShards(StartedRerouteAllocation allocation) { - } - - @Override - public void applyFailedShards(FailedRerouteAllocation allocation) { - } - - @Override - public boolean allocateUnassigned(RoutingAllocation allocation) { - boolean changed = false; - RoutingNodes routingNodes = allocation.routingNodes(); - /* - * 1. order nodes by the number of shards allocated on them least one first (this takes relocation into account) - * ie. if a shard is relocating the target nodes shard count is incremented. - * 2. iterate over the unassigned shards - * 2a. find the least busy node in the cluster that allows allocation for the current unassigned shard - * 2b. if a node is found add the shard to the node and remove it from the unassigned shards - * 3. iterate over the remaining unassigned shards and try to allocate them on next possible node - */ - // order nodes by number of shards (asc) - RoutingNode[] nodes = sortedNodesLeastToHigh(allocation); - - Iterator unassignedIterator = routingNodes.unassigned().iterator(); - int lastNode = 0; - - while (unassignedIterator.hasNext()) { - MutableShardRouting shard = unassignedIterator.next(); - // do the allocation, finding the least "busy" node - for (int i = 0; i < nodes.length; i++) { - RoutingNode node = nodes[lastNode]; - lastNode++; - if (lastNode == nodes.length) { - lastNode = 0; - } - - Decision decision = allocation.deciders().canAllocate(shard, node, allocation); - if (decision.type() == Decision.Type.YES) { - int numberOfShardsToAllocate = routingNodes.requiredAverageNumberOfShardsPerNode() - node.size(); - if (numberOfShardsToAllocate <= 0) { - continue; - } - - changed = true; - allocation.routingNodes().assign(shard, node.nodeId()); - unassignedIterator.remove(); - break; - } - } - } - - // allocate all the unassigned shards above the average per node. - for (Iterator it = routingNodes.unassigned().iterator(); it.hasNext(); ) { - MutableShardRouting shard = it.next(); - // go over the nodes and try and allocate the remaining ones - for (RoutingNode routingNode : sortedNodesLeastToHigh(allocation)) { - Decision decision = allocation.deciders().canAllocate(shard, routingNode, allocation); - if (decision.type() == Decision.Type.YES) { - changed = true; - allocation.routingNodes().assign(shard, routingNode.nodeId()); - it.remove(); - break; - } - } - } - return changed; - } - - @Override - public boolean rebalance(RoutingAllocation allocation) { - // take shards form busy nodes and move them to less busy nodes - boolean changed = false; - RoutingNode[] sortedNodesLeastToHigh = sortedNodesLeastToHigh(allocation); - if (sortedNodesLeastToHigh.length == 0) { - return false; - } - int lowIndex = 0; - int highIndex = sortedNodesLeastToHigh.length - 1; - boolean relocationPerformed; - do { - relocationPerformed = false; - while (lowIndex != highIndex) { - RoutingNode lowRoutingNode = sortedNodesLeastToHigh[lowIndex]; - RoutingNode highRoutingNode = sortedNodesLeastToHigh[highIndex]; - int averageNumOfShards = allocation.routingNodes().requiredAverageNumberOfShardsPerNode(); - - // only active shards can be removed so must count only active ones. - if (highRoutingNode.numberOfOwningShards() <= averageNumOfShards) { - highIndex--; - continue; - } - - if (lowRoutingNode.size() >= averageNumOfShards) { - lowIndex++; - continue; - } - - // Take a started shard from a "busy" node and move it to less busy node and go on - boolean relocated = false; - List startedShards = highRoutingNode.shardsWithState(STARTED); - for (MutableShardRouting startedShard : startedShards) { - Decision rebalanceDecision = allocation.deciders().canRebalance(startedShard, allocation); - if (rebalanceDecision.type() == Decision.Type.NO) { - continue; - } - - Decision allocateDecision = allocation.deciders().canAllocate(startedShard, lowRoutingNode, allocation); - if (allocateDecision.type() == Decision.Type.YES) { - changed = true; - allocation.routingNodes().assign(new MutableShardRouting(startedShard.index(), startedShard.id(), - lowRoutingNode.nodeId(), startedShard.currentNodeId(), startedShard.restoreSource(), - startedShard.primary(), INITIALIZING, startedShard.version() + 1), lowRoutingNode.nodeId()); - - allocation.routingNodes().relocate(startedShard, lowRoutingNode.nodeId()); - relocated = true; - relocationPerformed = true; - break; - } - } - - if (!relocated) { - highIndex--; - } - } - } while (relocationPerformed); - return changed; - } - - @Override - public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - if (!shardRouting.started()) { - return false; - } - boolean changed = false; - RoutingNode[] sortedNodesLeastToHigh = sortedNodesLeastToHigh(allocation); - if (sortedNodesLeastToHigh.length == 0) { - return false; - } - - for (RoutingNode nodeToCheck : sortedNodesLeastToHigh) { - // check if its the node we are moving from, no sense to check on it - if (nodeToCheck.nodeId().equals(node.nodeId())) { - continue; - } - Decision decision = allocation.deciders().canAllocate(shardRouting, nodeToCheck, allocation); - if (decision.type() == Decision.Type.YES) { - allocation.routingNodes().assign(new MutableShardRouting(shardRouting.index(), shardRouting.id(), - nodeToCheck.nodeId(), shardRouting.currentNodeId(), shardRouting.restoreSource(), - shardRouting.primary(), INITIALIZING, shardRouting.version() + 1), nodeToCheck.nodeId()); - - allocation.routingNodes().relocate(shardRouting, nodeToCheck.nodeId()); - changed = true; - break; - } - } - - return changed; - } - - private RoutingNode[] sortedNodesLeastToHigh(RoutingAllocation allocation) { - // create count per node id, taking into account relocations - final ObjectIntOpenHashMap nodeCounts = new ObjectIntOpenHashMap<>(); - for (RoutingNode node : allocation.routingNodes()) { - for (int i = 0; i < node.size(); i++) { - ShardRouting shardRouting = node.get(i); - String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId(); - nodeCounts.addTo(nodeId, 1); - } - } - RoutingNode[] nodes = allocation.routingNodes().toArray(); - Arrays.sort(nodes, new Comparator() { - @Override - public int compare(RoutingNode o1, RoutingNode o2) { - return nodeCounts.get(o1.nodeId()) - nodeCounts.get(o2.nodeId()); - } - }); - return nodes; - } -} diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java index 38f1e39f8d6..38d8c0a2d7b 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; @@ -27,7 +29,7 @@ import org.elasticsearch.gateway.GatewayAllocator; */ public class ShardsAllocatorModule extends AbstractModule { - public static final String EVEN_SHARD_COUNT_ALLOCATOR_KEY = "even_shard"; + private static final String EVEN_SHARD_COUNT_ALLOCATOR_KEY = "even_shard"; public static final String BALANCED_ALLOCATOR_KEY = "balanced"; // default @@ -37,13 +39,11 @@ public class ShardsAllocatorModule extends AbstractModule { private Class shardsAllocator; - public ShardsAllocatorModule(Settings settings) { this.settings = settings; shardsAllocator = loadShardsAllocator(settings); } - @Override protected void configure() { if (shardsAllocator == null) { @@ -56,10 +56,13 @@ public class ShardsAllocatorModule extends AbstractModule { private Class loadShardsAllocator(Settings settings) { final Class shardsAllocator; final String type = settings.get(TYPE_KEY, BALANCED_ALLOCATOR_KEY); + if (BALANCED_ALLOCATOR_KEY.equals(type)) { shardsAllocator = BalancedShardsAllocator.class; } else if (EVEN_SHARD_COUNT_ALLOCATOR_KEY.equals(type)) { - shardsAllocator = EvenShardsCountAllocator.class; + final ESLogger logger = Loggers.getLogger(getClass(), settings); + logger.warn("{} allocator has been removed in 2.0 using {} instead", EVEN_SHARD_COUNT_ALLOCATOR_KEY, BALANCED_ALLOCATOR_KEY); + shardsAllocator = BalancedShardsAllocator.class; } else { shardsAllocator = settings.getAsClass(TYPE_KEY, BalancedShardsAllocator.class, "org.elasticsearch.cluster.routing.allocation.allocator.", "Allocator"); diff --git a/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java b/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java index 07e83bed51b..cecb6a4a498 100644 --- a/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java +++ b/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.allocation; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; -import org.elasticsearch.cluster.routing.allocation.allocator.EvenShardsCountAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocatorModule; import org.elasticsearch.common.settings.ImmutableSettings; @@ -42,20 +41,20 @@ public class ShardsAllocatorModuleTests extends ElasticsearchIntegrationTest { } public void testLoadByShortKeyShardsAllocator() throws IOException { - Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, ShardsAllocatorModule.EVEN_SHARD_COUNT_ALLOCATOR_KEY) + Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, "even_shard") // legacy just to make sure we don't barf .build(); - assertAllocatorInstance(build, EvenShardsCountAllocator.class); + assertAllocatorInstance(build, BalancedShardsAllocator.class); build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, ShardsAllocatorModule.BALANCED_ALLOCATOR_KEY).build(); assertAllocatorInstance(build, BalancedShardsAllocator.class); } public void testLoadByClassNameShardsAllocator() throws IOException { - Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, "EvenShardsCount").build(); - assertAllocatorInstance(build, EvenShardsCountAllocator.class); + Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, "BalancedShards").build(); + assertAllocatorInstance(build, BalancedShardsAllocator.class); build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, - "org.elasticsearch.cluster.routing.allocation.allocator.EvenShardsCountAllocator").build(); - assertAllocatorInstance(build, EvenShardsCountAllocator.class); + "org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator").build(); + assertAllocatorInstance(build, BalancedShardsAllocator.class); } private void assertAllocatorInstance(Settings settings, Class clazz) throws IOException { From b87d360e79726813db90c7acecc965057a4a39ed Mon Sep 17 00:00:00 2001 From: Shay Banon Date: Sun, 3 May 2015 23:58:39 +0200 Subject: [PATCH 20/21] Automatically thread client based action listeners Today, we rely on the user to set request listener threads to true when they are on the client side in order not to block the IO threads on heavy operations. This proves to be very trappy for users, and end up creating problems that are very hard to debug. Instead, we can do the right thing, and automatically thread listeners that are used from the client when the client is a node client or a transport client. This change also removes the ability to set request level listener threading, in the effort of simplifying the code path and reasoning around when something is threaded and when it is not. closes #10940 --- .../elasticsearch/action/ActionRequest.java | 21 ---- .../action/ActionRequestBuilder.java | 8 +- .../action/TransportActionNodeProxy.java | 32 +---- .../get/TransportGetFieldMappingsAction.java | 2 - .../mlt/TransportMoreLikeThisAction.java | 4 +- .../search/SearchScrollRequestBuilder.java | 8 -- .../AbstractListenableActionFuture.java | 40 ++----- .../support/HandledTransportAction.java | 2 - .../support/PlainListenableActionFuture.java | 4 +- .../support/ThreadedActionListener.java | 110 ++++++++++++++++++ .../action/support/TransportAction.java | 73 ------------ .../TransportMasterNodeOperationAction.java | 14 +-- ...nsportShardReplicationOperationAction.java | 2 - .../TransportShardSingleOperationAction.java | 2 - .../elasticsearch/client/node/NodeClient.java | 17 ++- .../client/node/NodeClusterAdminClient.java | 22 ++-- .../client/node/NodeIndicesAdminClient.java | 22 ++-- .../client/transport/TransportClient.java | 2 +- .../TransportClientNodesService.java | 37 +----- .../support/InternalTransportClient.java | 12 +- .../InternalTransportClusterAdminClient.java | 13 ++- .../InternalTransportIndicesAdminClient.java | 11 +- .../health/RestClusterHealthAction.java | 1 - .../node/info/RestNodesInfoAction.java | 2 - .../node/stats/RestNodesStatsAction.java | 1 - .../delete/RestDeleteRepositoryAction.java | 1 - .../put/RestPutRepositoryAction.java | 1 - .../verify/RestVerifyRepositoryAction.java | 1 - .../reroute/RestClusterRerouteAction.java | 1 - .../RestClusterGetSettingsAction.java | 1 - .../RestClusterUpdateSettingsAction.java | 1 - .../shards/RestClusterSearchShardsAction.java | 1 - .../create/RestCreateSnapshotAction.java | 1 - .../cluster/state/RestClusterStateAction.java | 1 - .../cluster/stats/RestClusterStatsAction.java | 1 - .../alias/RestIndicesAliasesAction.java | 1 - .../get/RestGetIndicesAliasesAction.java | 1 - .../indices/analyze/RestAnalyzeAction.java | 1 - .../clear/RestClearIndicesCacheAction.java | 1 - .../indices/close/RestCloseIndexAction.java | 1 - .../indices/create/RestCreateIndexAction.java | 1 - .../indices/delete/RestDeleteIndexAction.java | 1 - .../indices/RestIndicesExistsAction.java | 1 - .../exists/types/RestTypesExistsAction.java | 1 - .../admin/indices/flush/RestFlushAction.java | 1 - .../mapping/put/RestPutMappingAction.java | 1 - .../indices/open/RestOpenIndexAction.java | 1 - .../indices/optimize/RestOptimizeAction.java | 1 - .../indices/recovery/RestRecoveryAction.java | 1 - .../indices/refresh/RestRefreshAction.java | 1 - .../segments/RestIndicesSegmentsAction.java | 1 - .../settings/RestUpdateSettingsAction.java | 1 - .../indices/stats/RestIndicesStatsAction.java | 1 - .../delete/RestDeleteIndexTemplateAction.java | 1 - .../get/RestGetIndexTemplateAction.java | 2 - .../put/RestPutIndexTemplateAction.java | 1 - .../query/RestValidateQueryAction.java | 1 - .../warmer/delete/RestDeleteWarmerAction.java | 1 - .../warmer/put/RestPutWarmerAction.java | 1 - .../rest/action/bulk/RestBulkAction.java | 1 - .../rest/action/cat/RestRecoveryAction.java | 1 - .../rest/action/count/RestCountAction.java | 1 - .../rest/action/delete/RestDeleteAction.java | 1 - .../rest/action/exists/RestExistsAction.java | 1 - .../fieldstats/RestFieldStatsAction.java | 1 - .../rest/action/get/RestGetAction.java | 1 - .../rest/action/get/RestGetSourceAction.java | 1 - .../rest/action/get/RestHeadAction.java | 1 - .../rest/action/get/RestMultiGetAction.java | 1 - .../rest/action/index/RestIndexAction.java | 1 - .../action/mlt/RestMoreLikeThisAction.java | 2 - .../action/percolate/RestPercolateAction.java | 2 - .../script/RestPutIndexedScriptAction.java | 2 +- .../action/search/RestMultiSearchAction.java | 1 - .../rest/action/search/RestSearchAction.java | 1 - .../action/search/RestSearchScrollAction.java | 1 - .../action/suggest/RestSuggestAction.java | 1 - .../RestMultiTermVectorsAction.java | 1 - .../rest/action/update/RestUpdateAction.java | 1 - .../elasticsearch/river/RiversService.java | 9 +- .../action/ListenerActionTests.java | 75 ++++++++++++ .../TransportActionFilterChainTests.java | 4 +- .../client/AbstractClientHeadersTests.java | 6 +- .../client/node/NodeClientHeadersTests.java | 16 +-- .../transport/TransportClientRetryTests.java | 2 +- 85 files changed, 300 insertions(+), 332 deletions(-) create mode 100644 src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java create mode 100644 src/test/java/org/elasticsearch/action/ListenerActionTests.java diff --git a/src/main/java/org/elasticsearch/action/ActionRequest.java b/src/main/java/org/elasticsearch/action/ActionRequest.java index 98eb6f946bf..3cae6449117 100644 --- a/src/main/java/org/elasticsearch/action/ActionRequest.java +++ b/src/main/java/org/elasticsearch/action/ActionRequest.java @@ -30,8 +30,6 @@ import java.io.IOException; */ public abstract class ActionRequest extends TransportRequest { - private boolean listenerThreaded = false; - protected ActionRequest() { super(); } @@ -43,25 +41,6 @@ public abstract class ActionRequest extends TransportRe //this.listenerThreaded = request.listenerThreaded(); } - /** - * Should the response listener be executed on a thread or not. - *

    - *

    When not executing on a thread, it will either be executed on the calling thread, or - * on an expensive, IO based, thread. - */ - public final boolean listenerThreaded() { - return this.listenerThreaded; - } - - /** - * Sets if the response listener be executed on a thread or not. - */ - @SuppressWarnings("unchecked") - public final T listenerThreaded(boolean listenerThreaded) { - this.listenerThreaded = listenerThreaded; - return (T) this; - } - public abstract ActionRequestValidationException validate(); @Override diff --git a/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java b/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java index 4335a40e030..78b0886162e 100644 --- a/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java @@ -48,12 +48,6 @@ public abstract class ActionRequestBuilder execute() { - PlainListenableActionFuture future = new PlainListenableActionFuture<>(request.listenerThreaded(), threadPool); + PlainListenableActionFuture future = new PlainListenableActionFuture<>(threadPool); execute(future); return future; } diff --git a/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java b/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java index b2410f95827..dccf9b0fd57 100644 --- a/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java +++ b/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java @@ -24,20 +24,15 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportResponseHandler; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.*; /** * A generic proxy that will execute the given action against a specific node. */ public class TransportActionNodeProxy extends AbstractComponent { - protected final TransportService transportService; - + private final TransportService transportService; private final GenericAction action; - private final TransportRequestOptions transportOptions; @Inject @@ -48,36 +43,17 @@ public class TransportActionNodeProxy listener) { + public void execute(final DiscoveryNode node, final Request request, final ActionListener listener) { ActionRequestValidationException validationException = request.validate(); if (validationException != null) { listener.onFailure(validationException); return; } - transportService.sendRequest(node, action.name(), request, transportOptions, new BaseTransportResponseHandler() { + transportService.sendRequest(node, action.name(), request, transportOptions, new ActionListenerResponseHandler(listener) { @Override public Response newInstance() { return action.newResponse(); } - - @Override - public String executor() { - if (request.listenerThreaded()) { - return ThreadPool.Names.LISTENER; - } - return ThreadPool.Names.SAME; - } - - @Override - public void handleResponse(Response response) { - listener.onResponse(response); - } - - @Override - public void handleException(TransportException exp) { - listener.onFailure(exp); - } }); } - } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java index ee4a6d6f076..3c8d1b7affa 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java @@ -64,8 +64,6 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction() { @Override public void onResponse(GetFieldMappingsResponse result) { diff --git a/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java b/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java index ab119e169f0..679ef533307 100644 --- a/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java +++ b/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java @@ -119,7 +119,6 @@ public class TransportMoreLikeThisAction extends HandledTransportAction() { @@ -197,8 +196,7 @@ public class TransportMoreLikeThisAction extends HandledTransportAction extends AdapterActionFuture implements ListenableActionFuture { - final boolean listenerThreaded; + private final static ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class); + final ThreadPool threadPool; volatile Object listeners; boolean executedListeners = false; - protected AbstractListenableActionFuture(boolean listenerThreaded, ThreadPool threadPool) { - this.listenerThreaded = listenerThreaded; + protected AbstractListenableActionFuture(ThreadPool threadPool) { this.threadPool = threadPool; } - public boolean listenerThreaded() { - return false; // we control execution of the listener - } - public ThreadPool threadPool() { return threadPool; } @@ -57,6 +53,7 @@ public abstract class AbstractListenableActionFuture extends AdapterAction } public void internalAddListener(ActionListener listener) { + listener = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener); boolean executeImmediate = false; synchronized (this) { if (executedListeners) { @@ -101,27 +98,10 @@ public abstract class AbstractListenableActionFuture extends AdapterAction } private void executeListener(final ActionListener listener) { - if (listenerThreaded) { - try { - threadPool.executor(ThreadPool.Names.LISTENER).execute(new Runnable() { - @Override - public void run() { - try { - listener.onResponse(actionGet()); - } catch (ElasticsearchException e) { - listener.onFailure(e); - } - } - }); - } catch (EsRejectedExecutionException e) { - listener.onFailure(e); - } - } else { - try { - listener.onResponse(actionGet()); - } catch (Throwable e) { - listener.onFailure(e); - } + try { + listener.onResponse(actionGet()); + } catch (Throwable e) { + listener.onFailure(e); } } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index f939893a98e..e2e1072feb3 100644 --- a/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -41,8 +41,6 @@ public abstract class HandledTransportAction() { @Override public void onResponse(Response response) { diff --git a/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java b/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java index 3d6cb28bced..1ec30606312 100644 --- a/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java +++ b/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java @@ -26,8 +26,8 @@ import org.elasticsearch.threadpool.ThreadPool; */ public class PlainListenableActionFuture extends AbstractListenableActionFuture { - public PlainListenableActionFuture(boolean listenerThreaded, ThreadPool threadPool) { - super(listenerThreaded, threadPool); + public PlainListenableActionFuture(ThreadPool threadPool) { + super(threadPool); } @Override diff --git a/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java new file mode 100644 index 00000000000..30011bcf571 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.concurrent.Future; + +/** + * An action listener that wraps another action listener and threading its execution. + */ +public final class ThreadedActionListener implements ActionListener { + + /** + * Wrapper that can be used to automatically wrap a listener in a threaded listener if needed. + */ + public static class Wrapper { + + private final ESLogger logger; + private final ThreadPool threadPool; + + private final boolean threadedListener; + + public Wrapper(ESLogger logger, Settings settings, ThreadPool threadPool) { + this.logger = logger; + this.threadPool = threadPool; + // Should the action listener be threaded or not by default. Action listeners are automatically threaded for client + // nodes and transport client in order to make sure client side code is not executed on IO threads. + this.threadedListener = DiscoveryNode.clientNode(settings) || TransportClient.CLIENT_TYPE.equals(settings.get(Client.CLIENT_TYPE_SETTING)); + } + + public ActionListener wrap(ActionListener listener) { + if (threadedListener == false) { + return listener; + } + // if its a future, the callback is very lightweight (flipping a bit) so no need to wrap it + if (listener instanceof Future) { + return listener; + } + return new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener); + } + } + + private final ESLogger logger; + private final ThreadPool threadPool; + private final String executor; + private final ActionListener listener; + + public ThreadedActionListener(ESLogger logger, ThreadPool threadPool, String executor, ActionListener listener) { + this.logger = logger; + this.threadPool = threadPool; + this.executor = executor; + this.listener = listener; + } + + @Override + public void onResponse(final Response response) { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + listener.onResponse(response); + } + + @Override + public void onFailure(Throwable t) { + listener.onFailure(t); + } + }); + } + + @Override + public void onFailure(final Throwable e) { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + listener.onFailure(e); + } + + @Override + public void onFailure(Throwable t) { + logger.warn("failed to execute failure callback on [{}], failure [{}]", t, listener, e); + } + }); + } +} diff --git a/src/main/java/org/elasticsearch/action/support/TransportAction.java b/src/main/java/org/elasticsearch/action/support/TransportAction.java index 7d3f3564693..c1a9f6098f6 100644 --- a/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -19,12 +19,10 @@ package org.elasticsearch.action.support; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.*; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.threadpool.ThreadPool; import java.util.concurrent.atomic.AtomicInteger; @@ -49,21 +47,11 @@ public abstract class TransportAction execute(Request request) { PlainActionFuture future = newFuture(); - // since we don't have a listener, and we release a possible lock with the future - // there is no need to execute it under a listener thread - request.listenerThreaded(false); execute(request, future); return future; } public final void execute(Request request, ActionListener listener) { - if (forceThreadedListener()) { - request.listenerThreaded(true); - } - if (request.listenerThreaded()) { - listener = new ThreadedActionListener<>(threadPool, listener, logger); - } - ActionRequestValidationException validationException = request.validate(); if (validationException != null) { listener.onFailure(validationException); @@ -83,69 +71,8 @@ public abstract class TransportAction listener); - static final class ThreadedActionListener implements ActionListener { - - private final ThreadPool threadPool; - - private final ActionListener listener; - - private final ESLogger logger; - - ThreadedActionListener(ThreadPool threadPool, ActionListener listener, ESLogger logger) { - this.threadPool = threadPool; - this.listener = listener; - this.logger = logger; - } - - @Override - public void onResponse(final Response response) { - try { - threadPool.executor(ThreadPool.Names.LISTENER).execute(new Runnable() { - @Override - public void run() { - try { - listener.onResponse(response); - } catch (Throwable e) { - listener.onFailure(e); - } - } - }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not run threaded action, execution rejected [{}] running on current thread", listener); - /* we don't care if that takes long since we are shutting down. But if we not respond somebody could wait - * for the response on the listener side which could be a remote machine so make sure we push it out there.*/ - try { - listener.onResponse(response); - } catch (Throwable e) { - listener.onFailure(e); - } - } - } - - @Override - public void onFailure(final Throwable e) { - try { - threadPool.executor(ThreadPool.Names.LISTENER).execute(new Runnable() { - @Override - public void run() { - listener.onFailure(e); - } - }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not run threaded action, execution rejected for listener [{}] running on current thread", listener); - /* we don't care if that takes long since we are shutting down (or queue capacity). But if we not respond somebody could wait - * for the response on the listener side which could be a remote machine so make sure we push it out there.*/ - listener.onFailure(e); - } - } - } - private static class RequestFilterChain implements ActionFilterChain { private final TransportAction action; diff --git a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java index 15e90c0784d..4ff18e68db0 100644 --- a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterService; @@ -75,14 +76,11 @@ public abstract class TransportMasterNodeOperationAction listener) { + protected void doExecute(final Request request, ActionListener listener) { + // TODO do we really need to wrap it in a listener? the handlers should be cheap + if ((listener instanceof ThreadedActionListener) == false) { + listener = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener); + } innerExecute(request, listener, new ClusterStateObserver(clusterService, request.masterNodeTimeout(), logger), false); } diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java index 0e488a602ff..efdc5ab0d4c 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java @@ -186,8 +186,6 @@ public abstract class TransportShardReplicationOperationAction { @Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { - // no need to have a threaded listener since we just send back a response - request.listenerThreaded(false); // if we have a local operation, execute it on a thread since we don't spawn request.operationThreaded(true); execute(request, new ActionListener() { diff --git a/src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java b/src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java index 1c91a7753dd..db6260e6f85 100644 --- a/src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java @@ -232,8 +232,6 @@ public abstract class TransportShardSingleOperationAction() { diff --git a/src/main/java/org/elasticsearch/client/node/NodeClient.java b/src/main/java/org/elasticsearch/client/node/NodeClient.java index ae85454ba50..0c7495e3412 100644 --- a/src/main/java/org/elasticsearch/client/node/NodeClient.java +++ b/src/main/java/org/elasticsearch/client/node/NodeClient.java @@ -21,6 +21,8 @@ package org.elasticsearch.client.node; import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.*; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; @@ -28,6 +30,8 @@ import org.elasticsearch.client.support.AbstractClient; import org.elasticsearch.client.support.Headers; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; @@ -38,6 +42,7 @@ import java.util.Map; */ public class NodeClient extends AbstractClient { + private final ESLogger logger; private final Settings settings; private final ThreadPool threadPool; @@ -46,9 +51,11 @@ public class NodeClient extends AbstractClient { private final ImmutableMap actions; private final Headers headers; + private final ThreadedActionListener.Wrapper threadedWrapper; @Inject public NodeClient(Settings settings, ThreadPool threadPool, NodeAdminClient admin, Map actions, Headers headers) { + this.logger = Loggers.getLogger(getClass(), settings); this.settings = settings; this.threadPool = threadPool; this.admin = admin; @@ -60,6 +67,7 @@ public class NodeClient extends AbstractClient { } } this.actions = actionsBuilder.immutableMap(); + this.threadedWrapper = new ThreadedActionListener.Wrapper(logger, settings, threadPool); } @Override @@ -84,16 +92,17 @@ public class NodeClient extends AbstractClient { @SuppressWarnings("unchecked") @Override - public > ActionFuture execute(Action action, Request request) { - headers.applyTo(request); - TransportAction transportAction = actions.get((ClientAction)action); - return transportAction.execute(request); + public > ActionFuture execute(final Action action, final Request request) { + PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + execute(action, request, actionFuture); + return actionFuture; } @SuppressWarnings("unchecked") @Override public > void execute(Action action, Request request, ActionListener listener) { headers.applyTo(request); + listener = threadedWrapper.wrap(listener); TransportAction transportAction = actions.get((ClientAction)action); transportAction.execute(request, listener); } diff --git a/src/main/java/org/elasticsearch/client/node/NodeClusterAdminClient.java b/src/main/java/org/elasticsearch/client/node/NodeClusterAdminClient.java index 41af331a93f..3e5144b5449 100644 --- a/src/main/java/org/elasticsearch/client/node/NodeClusterAdminClient.java +++ b/src/main/java/org/elasticsearch/client/node/NodeClusterAdminClient.java @@ -22,12 +22,17 @@ package org.elasticsearch.client.node; import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.*; import org.elasticsearch.action.admin.cluster.ClusterAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.support.AbstractClusterAdminClient; import org.elasticsearch.client.support.Headers; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import java.util.Map; @@ -37,14 +42,15 @@ import java.util.Map; */ public class NodeClusterAdminClient extends AbstractClusterAdminClient implements ClusterAdminClient { + private final ESLogger logger; private final ThreadPool threadPool; - private final ImmutableMap actions; - private final Headers headers; + private final ThreadedActionListener.Wrapper threadedWrapper; @Inject - public NodeClusterAdminClient(ThreadPool threadPool, Map actions, Headers headers) { + public NodeClusterAdminClient(Settings settings, ThreadPool threadPool, Map actions, Headers headers) { + this.logger = Loggers.getLogger(getClass(), settings); this.threadPool = threadPool; this.headers = headers; MapBuilder actionsBuilder = new MapBuilder<>(); @@ -54,6 +60,7 @@ public class NodeClusterAdminClient extends AbstractClusterAdminClient implement } } this.actions = actionsBuilder.immutableMap(); + this.threadedWrapper = new ThreadedActionListener.Wrapper(logger, settings, threadPool); } @Override @@ -63,16 +70,17 @@ public class NodeClusterAdminClient extends AbstractClusterAdminClient implement @SuppressWarnings("unchecked") @Override - public > ActionFuture execute(Action action, Request request) { - headers.applyTo(request); - TransportAction transportAction = actions.get((ClusterAction)action); - return transportAction.execute(request); + public > ActionFuture execute(final Action action, final Request request) { + PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + execute(action, request, actionFuture); + return actionFuture; } @SuppressWarnings("unchecked") @Override public > void execute(Action action, Request request, ActionListener listener) { headers.applyTo(request); + listener = threadedWrapper.wrap(listener); TransportAction transportAction = actions.get((ClusterAction)action); transportAction.execute(request, listener); } diff --git a/src/main/java/org/elasticsearch/client/node/NodeIndicesAdminClient.java b/src/main/java/org/elasticsearch/client/node/NodeIndicesAdminClient.java index c74d68d494f..6db38103f59 100644 --- a/src/main/java/org/elasticsearch/client/node/NodeIndicesAdminClient.java +++ b/src/main/java/org/elasticsearch/client/node/NodeIndicesAdminClient.java @@ -22,12 +22,17 @@ package org.elasticsearch.client.node; import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.*; import org.elasticsearch.action.admin.indices.IndicesAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.client.support.AbstractIndicesAdminClient; import org.elasticsearch.client.support.Headers; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import java.util.Map; @@ -37,14 +42,15 @@ import java.util.Map; */ public class NodeIndicesAdminClient extends AbstractIndicesAdminClient implements IndicesAdminClient { + private final ESLogger logger; private final ThreadPool threadPool; - private final ImmutableMap actions; - private final Headers headers; + private final ThreadedActionListener.Wrapper threadedWrapper; @Inject - public NodeIndicesAdminClient(ThreadPool threadPool, Map actions, Headers headers) { + public NodeIndicesAdminClient(Settings settings, ThreadPool threadPool, Map actions, Headers headers) { + this.logger = Loggers.getLogger(getClass(), settings); this.threadPool = threadPool; this.headers = headers; MapBuilder actionsBuilder = new MapBuilder<>(); @@ -54,6 +60,7 @@ public class NodeIndicesAdminClient extends AbstractIndicesAdminClient implement } } this.actions = actionsBuilder.immutableMap(); + this.threadedWrapper = new ThreadedActionListener.Wrapper(logger, settings, threadPool); } @Override @@ -63,16 +70,17 @@ public class NodeIndicesAdminClient extends AbstractIndicesAdminClient implement @SuppressWarnings("unchecked") @Override - public > ActionFuture execute(Action action, Request request) { - headers.applyTo(request); - TransportAction transportAction = actions.get((IndicesAction)action); - return transportAction.execute(request); + public > ActionFuture execute(final Action action, final Request request) { + PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + execute(action, request, actionFuture); + return actionFuture; } @SuppressWarnings("unchecked") @Override public > void execute(Action action, Request request, ActionListener listener) { headers.applyTo(request); + listener = threadedWrapper.wrap(listener); TransportAction transportAction = actions.get((IndicesAction)action); transportAction.execute(request, listener); } diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 6dd30b02af3..2d62100ef0d 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -93,7 +93,7 @@ import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilde */ public class TransportClient extends AbstractClient { - private static final String CLIENT_TYPE = "transport"; + public static final String CLIENT_TYPE = "transport"; final Injector injector; diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index e2cf962f65a..44d6e0d7851 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -38,11 +37,9 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -199,7 +196,7 @@ public class TransportClientNodesService extends AbstractComponent { ImmutableList nodes = this.nodes; ensureNodesAreAvailable(nodes); int index = getNodeNumber(); - RetryListener retryListener = new RetryListener<>(callback, listener, nodes, index, threadPool, logger); + RetryListener retryListener = new RetryListener<>(callback, listener, nodes, index); DiscoveryNode node = nodes.get((index) % nodes.size()); try { callback.doWithNode(node, retryListener); @@ -213,20 +210,15 @@ public class TransportClientNodesService extends AbstractComponent { private final NodeListenerCallback callback; private final ActionListener listener; private final ImmutableList nodes; - private final ESLogger logger; private final int index; - private ThreadPool threadPool; private volatile int i; - public RetryListener(NodeListenerCallback callback, ActionListener listener, ImmutableList nodes, - int index, ThreadPool threadPool, ESLogger logger) { + public RetryListener(NodeListenerCallback callback, ActionListener listener, ImmutableList nodes, int index) { this.callback = callback; this.listener = listener; this.nodes = nodes; this.index = index; - this.threadPool = threadPool; - this.logger = logger; } @Override @@ -239,38 +231,21 @@ public class TransportClientNodesService extends AbstractComponent { if (ExceptionsHelper.unwrapCause(e) instanceof ConnectTransportException) { int i = ++this.i; if (i >= nodes.size()) { - runFailureInListenerThreadPool(new NoNodeAvailableException("None of the configured nodes were available: " + nodes, e)); + listener.onFailure(new NoNodeAvailableException("None of the configured nodes were available: " + nodes, e)); } else { try { callback.doWithNode(nodes.get((index + i) % nodes.size()), this); } catch(final Throwable t) { // this exception can't come from the TransportService as it doesn't throw exceptions at all - runFailureInListenerThreadPool(t); + listener.onFailure(t); } } } else { - runFailureInListenerThreadPool(e); + listener.onFailure(e); } } - // need to ensure to not block the netty I/O thread, in case of retry due to the node sampling - private void runFailureInListenerThreadPool(final Throwable t) { - threadPool.executor(ThreadPool.Names.LISTENER).execute(new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - listener.onFailure(t); - } - @Override - public void onFailure(Throwable t) { - if (logger.isDebugEnabled()) { - logger.debug("Could not execute failure listener: [{}]", t, t.getMessage()); - } else { - logger.error("Could not execute failure listener: [{}]", t.getMessage()); - } - } - }); - } } public void close() { @@ -505,7 +480,7 @@ public class TransportClientNodesService extends AbstractComponent { } } - public static interface NodeListenerCallback { + public interface NodeListenerCallback { void doWithNode(DiscoveryNode node, ActionListener listener); } diff --git a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java index 11a9959019f..0aff540cb9e 100644 --- a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java +++ b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java @@ -22,6 +22,7 @@ package org.elasticsearch.client.transport.support; import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.*; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; import org.elasticsearch.client.support.AbstractClient; @@ -30,6 +31,8 @@ import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -41,21 +44,20 @@ import java.util.Map; */ public class InternalTransportClient extends AbstractClient { + private final ESLogger logger; private final Settings settings; private final ThreadPool threadPool; - private final TransportClientNodesService nodesService; - private final InternalTransportAdminClient adminClient; - private final ImmutableMap actions; - private final Headers headers; + private final ThreadedActionListener.Wrapper threadedWrapper; @Inject public InternalTransportClient(Settings settings, ThreadPool threadPool, TransportService transportService, TransportClientNodesService nodesService, InternalTransportAdminClient adminClient, Map actions, Headers headers) { + this.logger = Loggers.getLogger(getClass(), settings); this.settings = settings; this.threadPool = threadPool; this.nodesService = nodesService; @@ -68,6 +70,7 @@ public class InternalTransportClient extends AbstractClient { } } this.actions = actionsBuilder.immutableMap(); + this.threadedWrapper = new ThreadedActionListener.Wrapper(logger, settings, threadPool); } @Override @@ -102,6 +105,7 @@ public class InternalTransportClient extends AbstractClient { @Override public > void execute(final Action action, final Request request, ActionListener listener) { headers.applyTo(request); + listener = threadedWrapper.wrap(listener); final TransportActionNodeProxy proxy = actions.get(action); nodesService.execute(new TransportClientNodesService.NodeListenerCallback() { @Override diff --git a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java index 74be67d0009..5ede20849a3 100644 --- a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java +++ b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java @@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.*; import org.elasticsearch.action.admin.cluster.ClusterAction; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.support.AbstractClusterAdminClient; import org.elasticsearch.client.support.Headers; @@ -30,6 +31,8 @@ import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -42,17 +45,17 @@ import java.util.Map; @SuppressWarnings("unchecked") public class InternalTransportClusterAdminClient extends AbstractClusterAdminClient implements ClusterAdminClient { + private final ESLogger logger; private final TransportClientNodesService nodesService; - private final ThreadPool threadPool; - private final ImmutableMap actions; - private final Headers headers; + private final ThreadedActionListener.Wrapper threadedWrapper; @Inject public InternalTransportClusterAdminClient(Settings settings, TransportClientNodesService nodesService, ThreadPool threadPool, TransportService transportService, Map actions, Headers headers) { + this.logger = Loggers.getLogger(getClass(), settings); this.nodesService = nodesService; this.threadPool = threadPool; this.headers = headers; @@ -63,6 +66,7 @@ public class InternalTransportClusterAdminClient extends AbstractClusterAdminCli } } this.actions = actionsBuilder.immutableMap(); + this.threadedWrapper = new ThreadedActionListener.Wrapper(logger, settings, threadPool); } @Override @@ -80,8 +84,9 @@ public class InternalTransportClusterAdminClient extends AbstractClusterAdminCli @SuppressWarnings("unchecked") @Override - public > void execute(final Action action, final Request request, final ActionListener listener) { + public > void execute(final Action action, final Request request, ActionListener listener) { headers.applyTo(request); + listener = threadedWrapper.wrap(listener); final TransportActionNodeProxy proxy = actions.get(action); nodesService.execute(new TransportClientNodesService.NodeListenerCallback() { @Override diff --git a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java index 8cb63a17c7d..49295b1c968 100644 --- a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java +++ b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java @@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.*; import org.elasticsearch.action.admin.indices.IndicesAction; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.client.support.AbstractIndicesAdminClient; import org.elasticsearch.client.support.Headers; @@ -30,6 +31,8 @@ import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -42,17 +45,17 @@ import java.util.Map; @SuppressWarnings("unchecked") public class InternalTransportIndicesAdminClient extends AbstractIndicesAdminClient implements IndicesAdminClient { + private final ESLogger logger; private final TransportClientNodesService nodesService; - private final ThreadPool threadPool; - private final ImmutableMap actions; - private final Headers headers; + private final ThreadedActionListener.Wrapper threadedWrapper; @Inject public InternalTransportIndicesAdminClient(Settings settings, TransportClientNodesService nodesService, TransportService transportService, ThreadPool threadPool, Map actions, Headers headers) { + this.logger = Loggers.getLogger(getClass(), settings); this.nodesService = nodesService; this.threadPool = threadPool; this.headers = headers; @@ -63,6 +66,7 @@ public class InternalTransportIndicesAdminClient extends AbstractIndicesAdminCli } } this.actions = actionsBuilder.immutableMap(); + this.threadedWrapper = new ThreadedActionListener.Wrapper(logger, settings, threadPool); } @Override @@ -82,6 +86,7 @@ public class InternalTransportIndicesAdminClient extends AbstractIndicesAdminCli @Override public > void execute(final Action action, final Request request, ActionListener listener) { headers.applyTo(request); + listener = threadedWrapper.wrap(listener); final TransportActionNodeProxy proxy = actions.get(action); nodesService.execute(new TransportClientNodesService.NodeListenerCallback() { @Override diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java index b080e51055d..dfcb4438d57 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java @@ -50,7 +50,6 @@ public class RestClusterHealthAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { ClusterHealthRequest clusterHealthRequest = clusterHealthRequest(Strings.splitStringByCommaToArray(request.param("index"))); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); - clusterHealthRequest.listenerThreaded(false); clusterHealthRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterHealthRequest.masterNodeTimeout())); clusterHealthRequest.timeout(request.paramAsTime("timeout", clusterHealthRequest.timeout())); String waitForStatus = request.param("wait_for_status"); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java index dbda82ff387..a78c90aca63 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java @@ -81,8 +81,6 @@ public class RestNodesInfoAction extends BaseRestHandler { } final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds); - nodesInfoRequest.listenerThreaded(false); - // shortcut, dont do checks if only all is specified if (metrics.size() == 1 && metrics.contains("_all")) { nodesInfoRequest.all(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java index 4704d8ee832..d5bb383c33a 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java @@ -60,7 +60,6 @@ public class RestNodesStatsAction extends BaseRestHandler { Set metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all")); NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(nodesIds); - nodesStatsRequest.listenerThreaded(false); if (metrics.size() == 1 && metrics.contains("_all")) { nodesStatsRequest.all(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java index 46c06ecbe75..758ee34505a 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java @@ -45,7 +45,6 @@ public class RestDeleteRepositoryAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { DeleteRepositoryRequest deleteRepositoryRequest = deleteRepositoryRequest(request.param("repository")); deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); - deleteRepositoryRequest.listenerThreaded(false); deleteRepositoryRequest.timeout(request.paramAsTime("timeout", deleteRepositoryRequest.timeout())); deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); client.admin().cluster().deleteRepository(deleteRepositoryRequest, new AcknowledgedRestListener(channel)); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java index 60e68b25f42..b974a9be0fb 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java @@ -47,7 +47,6 @@ public class RestPutRepositoryAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { PutRepositoryRequest putRepositoryRequest = putRepositoryRequest(request.param("repository")); - putRepositoryRequest.listenerThreaded(false); putRepositoryRequest.source(request.content().toUtf8()); putRepositoryRequest.verify(request.paramAsBoolean("verify", true)); putRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRepositoryRequest.masterNodeTimeout())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java index cdfe3b7992b..bbc39cbd2f3 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java @@ -50,7 +50,6 @@ public class RestVerifyRepositoryAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { VerifyRepositoryRequest verifyRepositoryRequest = verifyRepositoryRequest(request.param("repository")); - verifyRepositoryRequest.listenerThreaded(false); verifyRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", verifyRepositoryRequest.masterNodeTimeout())); verifyRepositoryRequest.timeout(request.paramAsTime("timeout", verifyRepositoryRequest.timeout())); client.admin().cluster().verifyRepository(verifyRepositoryRequest, new RestToXContentListener(channel)); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java index 489acf93db1..7d5d2c9d5ff 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java @@ -54,7 +54,6 @@ public class RestClusterRerouteAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { final ClusterRerouteRequest clusterRerouteRequest = Requests.clusterRerouteRequest(); - clusterRerouteRequest.listenerThreaded(false); clusterRerouteRequest.dryRun(request.paramAsBoolean("dry_run", clusterRerouteRequest.dryRun())); clusterRerouteRequest.explain(request.paramAsBoolean("explain", clusterRerouteRequest.explain())); clusterRerouteRequest.timeout(request.paramAsTime("timeout", clusterRerouteRequest.timeout())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java index 5ef9c403ac2..a1cfdb48ddb 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java @@ -42,7 +42,6 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest() - .listenerThreaded(false) .routingTable(false) .nodes(false); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java index 87363d386ef..8536c037e89 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java @@ -46,7 +46,6 @@ public class RestClusterUpdateSettingsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = Requests.clusterUpdateSettingsRequest(); - clusterUpdateSettingsRequest.listenerThreaded(false); clusterUpdateSettingsRequest.timeout(request.paramAsTime("timeout", clusterUpdateSettingsRequest.timeout())); clusterUpdateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterUpdateSettingsRequest.masterNodeTimeout())); Map source = XContentFactory.xContent(request.content()).createParser(request.content()).mapAndClose(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java index 22151e7b0ad..a797a474eb6 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java @@ -53,7 +53,6 @@ public class RestClusterSearchShardsAction extends BaseRestHandler { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterSearchShardsRequest clusterSearchShardsRequest = Requests.clusterSearchShardsRequest(indices); clusterSearchShardsRequest.local(request.paramAsBoolean("local", clusterSearchShardsRequest.local())); - clusterSearchShardsRequest.listenerThreaded(false); clusterSearchShardsRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); clusterSearchShardsRequest.routing(request.param("routing")); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java index ff71f7e60f9..c62be2b3db6 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java @@ -46,7 +46,6 @@ public class RestCreateSnapshotAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { CreateSnapshotRequest createSnapshotRequest = createSnapshotRequest(request.param("repository"), request.param("snapshot")); - createSnapshotRequest.listenerThreaded(false); createSnapshotRequest.source(request.content().toUtf8()); createSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createSnapshotRequest.masterNodeTimeout())); createSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java index bac21dd13e4..4e4dc0826ae 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java @@ -57,7 +57,6 @@ public class RestClusterStateAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { final ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest(); - clusterStateRequest.listenerThreaded(false); clusterStateRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterStateRequest.indicesOptions())); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java index 5dd1c638b83..572a48de633 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java @@ -43,7 +43,6 @@ public class RestClusterStatsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { ClusterStatsRequest clusterStatsRequest = new ClusterStatsRequest().nodesIds(request.paramAsStringArray("nodeId", null)); - clusterStatsRequest.listenerThreaded(false); client.admin().cluster().clusterStats(clusterStatsRequest, new RestToXContentListener(channel)); } } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java index 2019b71426a..4841500cb66 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java @@ -49,7 +49,6 @@ public class RestIndicesAliasesAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); - indicesAliasesRequest.listenerThreaded(false); indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); try (XContentParser parser = XContentFactory.xContent(request.content()).createParser(request.content())) { // { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java index 060ec3f3012..34cdcb27962 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java @@ -61,7 +61,6 @@ public class RestGetIndicesAliasesAction extends BaseRestHandler { .nodes(false) .indices(indices); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.listenerThreaded(false); client.admin().cluster().state(clusterStateRequest, new RestBuilderListener(channel) { @Override diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java index 4a5e47b9664..5ce5eaef4ac 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java @@ -59,7 +59,6 @@ public class RestAnalyzeAction extends BaseRestHandler { AnalyzeRequest analyzeRequest = new AnalyzeRequest(request.param("index")); analyzeRequest.text(text); - analyzeRequest.listenerThreaded(false); analyzeRequest.preferLocal(request.paramAsBoolean("prefer_local", analyzeRequest.preferLocalShard())); analyzeRequest.analyzer(request.param("analyzer")); analyzeRequest.field(request.param("field")); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java index 54d9948537e..8eb83cba0d1 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java @@ -56,7 +56,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(Strings.splitStringByCommaToArray(request.param("index"))); - clearIndicesCacheRequest.listenerThreaded(false); clearIndicesCacheRequest.indicesOptions(IndicesOptions.fromRequest(request, clearIndicesCacheRequest.indicesOptions())); fromRequest(request, clearIndicesCacheRequest); client.admin().indices().clearCache(clearIndicesCacheRequest, new RestBuilderListener(channel) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java index 03b5a8c542f..940b6c1cefa 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java @@ -44,7 +44,6 @@ public class RestCloseIndexAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); - closeIndexRequest.listenerThreaded(false); closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout())); closeIndexRequest.timeout(request.paramAsTime("timeout", closeIndexRequest.timeout())); closeIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, closeIndexRequest.indicesOptions())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java index 2fca3ff6a27..8f3447ff9f8 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java @@ -43,7 +43,6 @@ public class RestCreateIndexAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { CreateIndexRequest createIndexRequest = new CreateIndexRequest(request.param("index")); - createIndexRequest.listenerThreaded(false); if (request.hasContent()) { createIndexRequest.source(request.content()); } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java index 126c471578e..43201592e31 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java @@ -44,7 +44,6 @@ public class RestDeleteIndexAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); - deleteIndexRequest.listenerThreaded(false); deleteIndexRequest.timeout(request.paramAsTime("timeout", deleteIndexRequest.timeout())); deleteIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexRequest.masterNodeTimeout())); deleteIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteIndexRequest.indicesOptions())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java index a0cf436a468..8ea4e633bc1 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java @@ -49,7 +49,6 @@ public class RestIndicesExistsAction extends BaseRestHandler { IndicesExistsRequest indicesExistsRequest = new IndicesExistsRequest(Strings.splitStringByCommaToArray(request.param("index"))); indicesExistsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesExistsRequest.indicesOptions())); indicesExistsRequest.local(request.paramAsBoolean("local", indicesExistsRequest.local())); - indicesExistsRequest.listenerThreaded(false); client.admin().indices().exists(indicesExistsRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(IndicesExistsResponse response) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java index c5571313a2f..a03a7f0fe1d 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java @@ -48,7 +48,6 @@ public class RestTypesExistsAction extends BaseRestHandler { TypesExistsRequest typesExistsRequest = new TypesExistsRequest( Strings.splitStringByCommaToArray(request.param("index")), Strings.splitStringByCommaToArray(request.param("type")) ); - typesExistsRequest.listenerThreaded(false); typesExistsRequest.local(request.paramAsBoolean("local", typesExistsRequest.local())); typesExistsRequest.indicesOptions(IndicesOptions.fromRequest(request, typesExistsRequest.indicesOptions())); client.admin().indices().typesExists(typesExistsRequest, new RestResponseListener(channel) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java index 5bcb775122a..6c95342cf89 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java @@ -53,7 +53,6 @@ public class RestFlushAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); - flushRequest.listenerThreaded(false); flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions())); flushRequest.force(request.paramAsBoolean("force", flushRequest.force())); flushRequest.waitIfOngoing(request.paramAsBoolean("wait_if_ongoing", flushRequest.waitIfOngoing())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java index d067ed96bd1..6df8edc0c82 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java @@ -67,7 +67,6 @@ public class RestPutMappingAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { PutMappingRequest putMappingRequest = putMappingRequest(Strings.splitStringByCommaToArray(request.param("index"))); - putMappingRequest.listenerThreaded(false); putMappingRequest.type(request.param("type")); putMappingRequest.source(request.content().toUtf8()); putMappingRequest.timeout(request.paramAsTime("timeout", putMappingRequest.timeout())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java index 7ff9a8fb620..e81bca30f6a 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java @@ -44,7 +44,6 @@ public class RestOpenIndexAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { OpenIndexRequest openIndexRequest = new OpenIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); - openIndexRequest.listenerThreaded(false); openIndexRequest.timeout(request.paramAsTime("timeout", openIndexRequest.timeout())); openIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", openIndexRequest.masterNodeTimeout())); openIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, openIndexRequest.indicesOptions())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java index c49745db629..74379f632c5 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java @@ -53,7 +53,6 @@ public class RestOptimizeAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { OptimizeRequest optimizeRequest = new OptimizeRequest(Strings.splitStringByCommaToArray(request.param("index"))); - optimizeRequest.listenerThreaded(false); optimizeRequest.indicesOptions(IndicesOptions.fromRequest(request, optimizeRequest.indicesOptions())); optimizeRequest.maxNumSegments(request.paramAsInt("max_num_segments", optimizeRequest.maxNumSegments())); optimizeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", optimizeRequest.onlyExpungeDeletes())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java index 2abd624a3c4..9d470c4b051 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java @@ -51,7 +51,6 @@ public class RestRecoveryAction extends BaseRestHandler { final RecoveryRequest recoveryRequest = new RecoveryRequest(Strings.splitStringByCommaToArray(request.param("index"))); recoveryRequest.detailed(request.paramAsBoolean("detailed", false)); recoveryRequest.activeOnly(request.paramAsBoolean("active_only", false)); - recoveryRequest.listenerThreaded(false); recoveryRequest.indicesOptions(IndicesOptions.fromRequest(request, recoveryRequest.indicesOptions())); client.admin().indices().recoveries(recoveryRequest, new RestBuilderListener(channel) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java index aaf8933ea40..949b82270ff 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java @@ -53,7 +53,6 @@ public class RestRefreshAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { RefreshRequest refreshRequest = new RefreshRequest(Strings.splitStringByCommaToArray(request.param("index"))); - refreshRequest.listenerThreaded(false); refreshRequest.indicesOptions(IndicesOptions.fromRequest(request, refreshRequest.indicesOptions())); client.admin().indices().refresh(refreshRequest, new RestBuilderListener(channel) { @Override diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java index 3806d8cbe1c..b5b2ba6e7c4 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java @@ -49,7 +49,6 @@ public class RestIndicesSegmentsAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { IndicesSegmentsRequest indicesSegmentsRequest = new IndicesSegmentsRequest(Strings.splitStringByCommaToArray(request.param("index"))); indicesSegmentsRequest.verbose(request.paramAsBoolean("verbose", false)); - indicesSegmentsRequest.listenerThreaded(false); indicesSegmentsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesSegmentsRequest.indicesOptions())); client.admin().indices().segments(indicesSegmentsRequest, new RestBuilderListener(channel) { @Override diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java index d19b5c9a1f4..718d16c4705 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java @@ -55,7 +55,6 @@ public class RestUpdateSettingsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { UpdateSettingsRequest updateSettingsRequest = updateSettingsRequest(Strings.splitStringByCommaToArray(request.param("index"))); - updateSettingsRequest.listenerThreaded(false); updateSettingsRequest.timeout(request.paramAsTime("timeout", updateSettingsRequest.timeout())); updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java index 52ac511366d..43fcbd57171 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java @@ -53,7 +53,6 @@ public class RestIndicesStatsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); - indicesStatsRequest.listenerThreaded(false); indicesStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesStatsRequest.indicesOptions())); indicesStatsRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); indicesStatsRequest.types(Strings.splitStringByCommaToArray(request.param("types"))); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java index 8ebb8675d76..2b6ebbc6023 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java @@ -40,7 +40,6 @@ public class RestDeleteIndexTemplateAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { DeleteIndexTemplateRequest deleteIndexTemplateRequest = new DeleteIndexTemplateRequest(request.param("name")); - deleteIndexTemplateRequest.listenerThreaded(false); deleteIndexTemplateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexTemplateRequest.masterNodeTimeout())); client.admin().indices().deleteTemplate(deleteIndexTemplateRequest, new AcknowledgedRestListener(channel)); } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java index dac342fab95..a4c2539f226 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java @@ -58,8 +58,6 @@ public class RestGetIndexTemplateAction extends BaseRestHandler { getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local())); getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout())); - getIndexTemplatesRequest.listenerThreaded(false); - final boolean implicitAll = getIndexTemplatesRequest.names().length == 0; client.admin().indices().getTemplates(getIndexTemplatesRequest, new RestBuilderListener(channel) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java index 7875f12259b..e555cfd0fac 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java @@ -42,7 +42,6 @@ public class RestPutIndexTemplateAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest(request.param("name")); - putRequest.listenerThreaded(false); putRequest.template(request.param("template", putRequest.template())); putRequest.order(request.paramAsInt("order", putRequest.order())); putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java index 32a2d24e888..4f237465bd6 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java @@ -57,7 +57,6 @@ public class RestValidateQueryAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest(Strings.splitStringByCommaToArray(request.param("index"))); - validateQueryRequest.listenerThreaded(false); validateQueryRequest.indicesOptions(IndicesOptions.fromRequest(request, validateQueryRequest.indicesOptions())); if (RestActions.hasBodyContent(request)) { validateQueryRequest.source(RestActions.getRestContent(request)); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java index da41e80cc97..1d3fae87616 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java @@ -47,7 +47,6 @@ public class RestDeleteWarmerAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { DeleteWarmerRequest deleteWarmerRequest = new DeleteWarmerRequest(Strings.splitStringByCommaToArray(request.param("name"))) .indices(Strings.splitStringByCommaToArray(request.param("index"))); - deleteWarmerRequest.listenerThreaded(false); deleteWarmerRequest.timeout(request.paramAsTime("timeout", deleteWarmerRequest.timeout())); deleteWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteWarmerRequest.masterNodeTimeout())); deleteWarmerRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteWarmerRequest.indicesOptions())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java index 9a802f1bf3d..62f666364f5 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java @@ -59,7 +59,6 @@ public class RestPutWarmerAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { PutWarmerRequest putWarmerRequest = new PutWarmerRequest(request.param("name")); - putWarmerRequest.listenerThreaded(false); SearchRequest searchRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index"))) .types(Strings.splitStringByCommaToArray(request.param("type"))) .queryCache(request.paramAsBoolean("query_cache", null)) diff --git a/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java b/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java index 9578e078270..1a3a1b38a6e 100644 --- a/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java +++ b/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java @@ -71,7 +71,6 @@ public class RestBulkAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { BulkRequest bulkRequest = Requests.bulkRequest(); - bulkRequest.listenerThreaded(false); String defaultIndex = request.param("index"); String defaultType = request.param("type"); String defaultRouting = request.param("routing"); diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java index a7a7eb53e85..e2d277819e0 100644 --- a/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java @@ -68,7 +68,6 @@ public class RestRecoveryAction extends AbstractCatAction { final RecoveryRequest recoveryRequest = new RecoveryRequest(Strings.splitStringByCommaToArray(request.param("index"))); recoveryRequest.detailed(request.paramAsBoolean("detailed", false)); recoveryRequest.activeOnly(request.paramAsBoolean("active_only", false)); - recoveryRequest.listenerThreaded(false); recoveryRequest.indicesOptions(IndicesOptions.fromRequest(request, recoveryRequest.indicesOptions())); client.admin().indices().recoveries(recoveryRequest, new RestResponseListener(channel) { diff --git a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java index 58dcfa50973..dc38db49181 100644 --- a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java +++ b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java @@ -58,7 +58,6 @@ public class RestCountAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { CountRequest countRequest = new CountRequest(Strings.splitStringByCommaToArray(request.param("index"))); countRequest.indicesOptions(IndicesOptions.fromRequest(request, countRequest.indicesOptions())); - countRequest.listenerThreaded(false); if (RestActions.hasBodyContent(request)) { countRequest.source(RestActions.getRestContent(request)); } else { diff --git a/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java b/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java index fced1421cb0..69f06cef1f1 100644 --- a/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java +++ b/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java @@ -51,7 +51,6 @@ public class RestDeleteAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), request.param("type"), request.param("id")); - deleteRequest.listenerThreaded(false); deleteRequest.operationThreaded(true); deleteRequest.routing(request.param("routing")); diff --git a/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java b/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java index 85f73f61ec4..7cfe7caf3fd 100644 --- a/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java @@ -48,7 +48,6 @@ public class RestExistsAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { final ExistsRequest existsRequest = new ExistsRequest(Strings.splitStringByCommaToArray(request.param("index"))); existsRequest.indicesOptions(IndicesOptions.fromRequest(request, existsRequest.indicesOptions())); - existsRequest.listenerThreaded(false); if (RestActions.hasBodyContent(request)) { existsRequest.source(RestActions.getRestContent(request)); } else { diff --git a/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java b/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java index fd45c5a56d4..ca382f3c642 100644 --- a/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java @@ -57,7 +57,6 @@ public class RestFieldStatsAction extends BaseRestHandler { fieldStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, fieldStatsRequest.indicesOptions())); fieldStatsRequest.fields(Strings.splitStringByCommaToArray(request.param("fields"))); fieldStatsRequest.level(request.param("level", FieldStatsRequest.DEFAULT_LEVEL)); - fieldStatsRequest.listenerThreaded(false); client.fieldStats(fieldStatsRequest, new RestBuilderListener(channel) { @Override diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java index ae2e76be690..9ed5c4d5fe9 100644 --- a/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java +++ b/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java @@ -50,7 +50,6 @@ public class RestGetAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); - getRequest.listenerThreaded(false); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); // order is important, set it after routing, so it will set the routing diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java index 4142e667985..db3954ec5e8 100644 --- a/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java +++ b/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java @@ -51,7 +51,6 @@ public class RestGetSourceAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); - getRequest.listenerThreaded(false); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); // order is important, set it after routing, so it will set the routing diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java index 9217200a7e6..d0c1433bb47 100644 --- a/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java +++ b/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java @@ -47,7 +47,6 @@ public class RestHeadAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); - getRequest.listenerThreaded(false); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); // order is important, set it after routing, so it will set the routing diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java index b9cdd78063c..14e4496085b 100644 --- a/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java +++ b/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java @@ -53,7 +53,6 @@ public class RestMultiGetAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { MultiGetRequest multiGetRequest = new MultiGetRequest(); - multiGetRequest.listenerThreaded(false); multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh())); multiGetRequest.preference(request.param("preference")); multiGetRequest.realtime(request.paramAsBoolean("realtime", null)); diff --git a/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java index d95ef3e9498..a0d5b279e71 100644 --- a/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java @@ -70,7 +70,6 @@ public class RestIndexAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { IndexRequest indexRequest = new IndexRequest(request.param("index"), request.param("type"), request.param("id")); - indexRequest.listenerThreaded(false); indexRequest.operationThreaded(true); indexRequest.routing(request.param("routing")); indexRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing diff --git a/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java b/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java index a9d60e02aa5..41f28574bdf 100644 --- a/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java +++ b/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java @@ -50,8 +50,6 @@ public class RestMoreLikeThisAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { MoreLikeThisRequest mltRequest = moreLikeThisRequest(request.param("index")).type(request.param("type")).id(request.param("id")); mltRequest.routing(request.param("routing")); - - mltRequest.listenerThreaded(false); //TODO the ParseField class that encapsulates the supported names used for an attribute //needs some work if it is to be used in a REST context like this too // See the MoreLikeThisQueryParser constants that hold the valid syntax diff --git a/src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java b/src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java index fb609d3e14a..4ee543f5362 100644 --- a/src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java @@ -94,8 +94,6 @@ public class RestPercolateAction extends BaseRestHandler { } void executePercolate(final PercolateRequest percolateRequest, final RestChannel restChannel, final Client client) { - // we just send a response, no need to fork - percolateRequest.listenerThreaded(false); client.percolate(percolateRequest, new RestToXContentListener(restChannel)); } diff --git a/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java b/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java index d63a39ac555..35e3f2cc473 100644 --- a/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java @@ -75,7 +75,7 @@ public class RestPutIndexedScriptAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, Client client) { - PutIndexedScriptRequest putRequest = new PutIndexedScriptRequest(getScriptLang(request), request.param("id")).listenerThreaded(false); + PutIndexedScriptRequest putRequest = new PutIndexedScriptRequest(getScriptLang(request), request.param("id")); putRequest.version(request.paramAsLong("version", putRequest.version())); putRequest.versionType(VersionType.fromString(request.param("version_type"), putRequest.versionType())); putRequest.source(request.content()); diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 92c0ba9b217..6dfe605d96b 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -56,7 +56,6 @@ public class RestMultiSearchAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); - multiSearchRequest.listenerThreaded(false); String[] indices = Strings.splitStringByCommaToArray(request.param("index")); String[] types = Strings.splitStringByCommaToArray(request.param("type")); diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 9c3e85a2e94..70060588ded 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -77,7 +77,6 @@ public class RestSearchAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { SearchRequest searchRequest; searchRequest = RestSearchAction.parseSearchRequest(request); - searchRequest.listenerThreaded(false); client.search(searchRequest, new RestStatusToXContentListener(channel)); } diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java index 15de56265bc..c53331bb496 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java @@ -60,7 +60,6 @@ public class RestSearchScrollAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { String scrollId = request.param("scroll_id"); SearchScrollRequest searchScrollRequest = new SearchScrollRequest(); - searchScrollRequest.listenerThreaded(false); searchScrollRequest.scrollId(scrollId); String scroll = request.param("scroll"); if (scroll != null) { diff --git a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java index 9f9c3946b36..184a62244d1 100644 --- a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java +++ b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java @@ -59,7 +59,6 @@ public class RestSuggestAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { SuggestRequest suggestRequest = new SuggestRequest(Strings.splitStringByCommaToArray(request.param("index"))); suggestRequest.indicesOptions(IndicesOptions.fromRequest(request, suggestRequest.indicesOptions())); - suggestRequest.listenerThreaded(false); if (RestActions.hasBodyContent(request)) { suggestRequest.suggest(RestActions.getRestContent(request)); } else { diff --git a/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java b/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java index b0cd0bd63a7..400869fff0e 100644 --- a/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java @@ -49,7 +49,6 @@ public class RestMultiTermVectorsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { MultiTermVectorsRequest multiTermVectorsRequest = new MultiTermVectorsRequest(); - multiTermVectorsRequest.listenerThreaded(false); TermVectorsRequest template = new TermVectorsRequest(); template.index(request.param("index")); template.type(request.param("type")); diff --git a/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java b/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java index c884fe4267e..d019e598cac 100644 --- a/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java @@ -55,7 +55,6 @@ public class RestUpdateAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { UpdateRequest updateRequest = new UpdateRequest(request.param("index"), request.param("type"), request.param("id")); - updateRequest.listenerThreaded(false); updateRequest.routing(request.param("routing")); updateRequest.parent(request.param("parent")); updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout())); diff --git a/src/main/java/org/elasticsearch/river/RiversService.java b/src/main/java/org/elasticsearch/river/RiversService.java index ed7369d8ad0..fdb2589a540 100644 --- a/src/main/java/org/elasticsearch/river/RiversService.java +++ b/src/main/java/org/elasticsearch/river/RiversService.java @@ -28,6 +28,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.get.GetRequestBuilder; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -231,7 +232,7 @@ public class RiversService extends AbstractLifecycleComponent { logger.trace("river {} is already allocated", routing.riverName().getName()); continue; } - prepareGetMetaDocument(routing.riverName().name()).execute(new ActionListener() { + prepareGetMetaDocument(routing.riverName().name()).execute(new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, new ActionListener() { @Override public void onResponse(GetResponse getResponse) { if (!rivers.containsKey(routing.riverName())) { @@ -255,7 +256,7 @@ public class RiversService extends AbstractLifecycleComponent { logger.debug("failed to get _meta from [{}]/[{}], retrying...", e, routing.riverName().type(), routing.riverName().name()); final ActionListener listener = this; try { - threadPool.schedule(TimeValue.timeValueSeconds(5), ThreadPool.Names.SAME, new Runnable() { + threadPool.schedule(TimeValue.timeValueSeconds(5), ThreadPool.Names.LISTENER, new Runnable() { @Override public void run() { prepareGetMetaDocument(routing.riverName().name()).execute(listener); @@ -268,12 +269,12 @@ public class RiversService extends AbstractLifecycleComponent { logger.warn("failed to get _meta from [{}]/[{}]", e, routing.riverName().type(), routing.riverName().name()); } } - }); + })); } } private GetRequestBuilder prepareGetMetaDocument(String riverName) { - return client.prepareGet(riverIndexName, riverName, "_meta").setPreference("_primary").setListenerThreaded(true); + return client.prepareGet(riverIndexName, riverName, "_meta").setPreference("_primary"); } } } diff --git a/src/test/java/org/elasticsearch/action/ListenerActionTests.java b/src/test/java/org/elasticsearch/action/ListenerActionTests.java new file mode 100644 index 00000000000..50cde99fcdb --- /dev/null +++ b/src/test/java/org/elasticsearch/action/ListenerActionTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +/** + */ +public class ListenerActionTests extends ElasticsearchIntegrationTest { + + @Test + public void verifyThreadedListeners() throws Throwable { + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference failure = new AtomicReference<>(); + final AtomicReference threadName = new AtomicReference<>(); + Client client = client(); + + IndexRequest request = new IndexRequest("test", "type", "1"); + if (randomBoolean()) { + // set the source, without it, we will have a verification failure + request.source("field1", "value1"); + } + + client.index(request, new ActionListener() { + @Override + public void onResponse(IndexResponse indexResponse) { + threadName.set(Thread.currentThread().getName()); + latch.countDown(); + } + + @Override + public void onFailure(Throwable e) { + threadName.set(Thread.currentThread().getName()); + failure.set(e); + latch.countDown(); + } + }); + + latch.await(); + + boolean shouldBeThreaded = DiscoveryNode.clientNode(client.settings()) || TransportClient.CLIENT_TYPE.equals(client.settings().get(Client.CLIENT_TYPE_SETTING)); + if (shouldBeThreaded) { + assertTrue(threadName.get().contains("listener")); + } else { + assertFalse(threadName.get().contains("listener")); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index 1eccf3521f0..e32eb6cdb7c 100644 --- a/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -92,7 +92,7 @@ public class TransportActionFilterChainTests extends ElasticsearchTestCase { } } - PlainListenableActionFuture future = new PlainListenableActionFuture<>(false, null); + PlainListenableActionFuture future = new PlainListenableActionFuture<>(null); transportAction.execute(new TestRequest(), future); try { assertThat(future.get(), notNullValue()); @@ -174,7 +174,7 @@ public class TransportActionFilterChainTests extends ElasticsearchTestCase { } } - PlainListenableActionFuture future = new PlainListenableActionFuture<>(false, null); + PlainListenableActionFuture future = new PlainListenableActionFuture<>(null); transportAction.execute(new TestRequest(), future); try { assertThat(future.get(), notNullValue()); diff --git a/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java b/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java index 05561a9dec0..e4484ef3177 100644 --- a/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java +++ b/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java @@ -51,6 +51,7 @@ import org.elasticsearch.client.support.Headers; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportMessage; import org.junit.After; import org.junit.Before; @@ -84,16 +85,19 @@ public abstract class AbstractClientHeadersTests extends ElasticsearchTestCase { CreateIndexAction.INSTANCE, IndicesStatsAction.INSTANCE, ClearIndicesCacheAction.INSTANCE, FlushAction.INSTANCE }; + protected ThreadPool threadPool; private Client client; @Before public void initClient() { + threadPool = new ThreadPool("test-" + getTestName()); client = buildClient(HEADER_SETTINGS, ACTIONS); } @After - public void cleanupClient() { + public void cleanupClient() throws Exception { client.close(); + terminate(threadPool); } protected abstract Client buildClient(Settings headersSettings, GenericAction[] testedActions); diff --git a/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java b/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java index 1bf3bf5be32..ce4cfa43439 100644 --- a/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java +++ b/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java @@ -42,18 +42,6 @@ public class NodeClientHeadersTests extends AbstractClientHeadersTests { private static final ActionFilters EMPTY_FILTERS = new ActionFilters(ImmutableSet.of()); - private ThreadPool threadPool; - - @Before - public void init() { - threadPool = new ThreadPool("test"); - } - - @After - public void cleanup() throws InterruptedException { - terminate(threadPool); - } - @Override protected Client buildClient(Settings headersSettings, GenericAction[] testedActions) { Settings settings = HEADER_SETTINGS; @@ -61,8 +49,8 @@ public class NodeClientHeadersTests extends AbstractClientHeadersTests { Headers headers = new Headers(settings); Actions actions = new Actions(settings, threadPool, testedActions); - NodeClusterAdminClient clusterClient = new NodeClusterAdminClient(threadPool, actions, headers); - NodeIndicesAdminClient indicesClient = new NodeIndicesAdminClient(threadPool, actions, headers); + NodeClusterAdminClient clusterClient = new NodeClusterAdminClient(settings, threadPool, actions, headers); + NodeIndicesAdminClient indicesClient = new NodeIndicesAdminClient(settings, threadPool, actions, headers); NodeAdminClient adminClient = new NodeAdminClient(settings, clusterClient, indicesClient); return new NodeClient(settings, threadPool, adminClient, actions, headers); } diff --git a/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java b/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java index c1d11bb2312..17e3c0c5d87 100644 --- a/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java +++ b/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java @@ -84,7 +84,7 @@ public class TransportClientRetryTests extends ElasticsearchIntegrationTest { if (randomBoolean()) { clusterState = transportClient.admin().cluster().state(clusterStateRequest).get().getState(); } else { - PlainListenableActionFuture future = new PlainListenableActionFuture<>(clusterStateRequest.listenerThreaded(), transportClient.threadPool()); + PlainListenableActionFuture future = new PlainListenableActionFuture<>(transportClient.threadPool()); transportClient.admin().cluster().state(clusterStateRequest, future); clusterState = future.get().getState(); } From 70ae862fe45c88c1e9ae894554af9176f00f3424 Mon Sep 17 00:00:00 2001 From: Mikael Mattsson Date: Fri, 1 May 2015 16:58:04 +0200 Subject: [PATCH 21/21] Docs: Add Elasticsearch Indexer for WordPress to integrations.asciidoc Closes #10921 --- docs/community/integrations.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/community/integrations.asciidoc b/docs/community/integrations.asciidoc index d488e324313..bb2ff6850a0 100644 --- a/docs/community/integrations.asciidoc +++ b/docs/community/integrations.asciidoc @@ -59,6 +59,9 @@ * http://searchbox-io.github.com/wp-elasticsearch/[Wp-Elasticsearch]: Elasticsearch WordPress Plugin + +* https://github.com/wallmanderco/elasticsearch-indexer[Elasticsearch Indexer]: + Elasticsearch WordPress Plugin * https://github.com/OlegKunitsyn/eslogd[eslogd]: Linux daemon that replicates events to a central Elasticsearch server in real-time