Merge branch 'master' into pr-10624

Conflicts:
	src/main/java/org/elasticsearch/index/engine/EngineConfig.java
	src/main/java/org/elasticsearch/index/shard/IndexShard.java
	src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
	src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java
This commit is contained in:
Simon Willnauer 2015-05-04 11:37:54 +02:00
commit 7e5f9d5628
244 changed files with 3068 additions and 3892 deletions

View File

@ -1,368 +1,99 @@
##################### Elasticsearch Configuration Example ##################### # ======================== Elasticsearch Configuration =========================
# This file contains an overview of various configuration settings,
# targeted at operations staff. Application developers should
# consult the guide at <http://elasticsearch.org/guide>.
# #
# The installation procedure is covered at # NOTE: Elasticsearch comes with reasonable defaults for most settings.
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>. # Before you set out to tweak and tune the configuration, make sure you
# understand what are you trying to accomplish and the consequences.
# #
# Elasticsearch comes with reasonable defaults for most settings, # The primary way of configuring a node is via this file. This template lists
# so you can try it out without bothering with configuration. # the most important settings you may want to configure for a production cluster.
# #
# Most of the time, these defaults are just fine for running a production # Please see the documentation for further information on configuration options:
# cluster. If you're fine-tuning your cluster, or wondering about the # <http://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration.html>
# effect of certain configuration option, please _do ask_ on the
# mailing list or IRC channel [http://elasticsearch.org/community].
# Any element in the configuration can be replaced with environment variables
# by placing them in ${...} notation. For example:
# #
#node.rack: ${RACK_ENV_VAR} # ---------------------------------- Cluster -----------------------------------
# For information on supported formats and syntax for the config file, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
################################### Cluster ###################################
# Cluster name identifies your cluster for auto-discovery. If you're running
# multiple clusters on the same network, make sure you're using unique names.
# #
#cluster.name: elasticsearch # Use a descriptive name for your cluster:
#################################### Node #####################################
# Node names are generated dynamically on startup, so you're relieved
# from configuring them manually. You can tie this node to a specific name:
# #
#node.name: "Franz Kafka" # cluster.name: my-application
# Every node can be configured to allow or deny being eligible as the master,
# and to allow or deny to store the data.
# #
# Allow this node to be eligible as a master node (enabled by default): # ------------------------------------ Node ------------------------------------
# #
#node.master: true # Use a descriptive name for the node:
# #
# Allow this node to store data (enabled by default): # node.name: node-1
# #
#node.data: true # Add custom attributes to the node:
# You can exploit these settings to design advanced cluster topologies.
# #
# 1. You want this node to never become a master node, only to hold data. # node.rack: r1
# This will be the "workhorse" of your cluster.
# #
#node.master: false # ----------------------------------- Paths ------------------------------------
#node.data: true
# #
# 2. You want this node to only serve as a master: to not store any data and # Path to directory where to store the data (separate multiple locations by comma):
# to have free resources. This will be the "coordinator" of your cluster.
# #
#node.master: true # path.data: /path/to/data
#node.data: false
# #
# 3. You want this node to be neither master nor data node, but
# to act as a "search load balancer" (fetching data from nodes,
# aggregating results, etc.)
#
#node.master: false
#node.data: false
# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
# Node Info API [http://localhost:9200/_nodes] or GUI tools
# such as <http://www.elasticsearch.org/overview/marvel/>,
# <http://github.com/karmi/elasticsearch-paramedic>,
# <http://github.com/lukas-vlcek/bigdesk> and
# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
# A node can have generic attributes associated with it, which can later be used
# for customized shard allocation filtering, or allocation awareness. An attribute
# is a simple key value pair, similar to node.key: value, here is an example:
#
#node.rack: rack314
# By default, multiple nodes are allowed to start from the same installation location
# to disable it, set the following:
#node.max_local_storage_nodes: 1
#################################### Index ####################################
# You can set a number of options (such as shard/replica options, mapping
# or analyzer definitions, translog settings, ...) for indices globally,
# in this file.
#
# Note, that it makes more sense to configure index settings specifically for
# a certain index, either when creating it or by using the index templates API.
#
# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
# for more information.
# Set the number of shards (splits) of an index (5 by default):
#
#index.number_of_shards: 5
# Set the number of replicas (additional copies) of an index (1 by default):
#
#index.number_of_replicas: 1
# Note, that for development on a local machine, with small indices, it usually
# makes sense to "disable" the distributed features:
#
#index.number_of_shards: 1
#index.number_of_replicas: 0
# These settings directly affect the performance of index and search operations
# in your cluster. Assuming you have enough machines to hold shards and
# replicas, the rule of thumb is:
#
# 1. Having more *shards* enhances the _indexing_ performance and allows to
# _distribute_ a big index across machines.
# 2. Having more *replicas* enhances the _search_ performance and improves the
# cluster _availability_.
#
# The "number_of_shards" is a one-time setting for an index.
#
# The "number_of_replicas" can be increased or decreased anytime,
# by using the Index Update Settings API.
#
# Elasticsearch takes care about load balancing, relocating, gathering the
# results from nodes, etc. Experiment with different settings to fine-tune
# your setup.
# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
# the index status.
#################################### Paths ####################################
# Path to directory containing configuration (this file and logging.yml):
#
#path.conf: /path/to/conf
# Path to directory where to store index data allocated for this node.
#
#path.data: /path/to/data
#
# Can optionally include more than one location, causing data to be striped across
# the locations (a la RAID 0) on a file level, favouring locations with most free
# space on creation. For example:
#
#path.data: /path/to/data1,/path/to/data2
# Path to log files: # Path to log files:
# #
#path.logs: /path/to/logs # path.logs: /path/to/logs
# Path to where plugins are installed:
# #
#path.plugins: /path/to/plugins # ----------------------------------- Memory -----------------------------------
#################################### Plugin ###################################
# If a plugin listed here is not installed for current node, the node will not start.
# #
#plugin.mandatory: mapper-attachments,lang-groovy # Lock the memory on startup:
################################### Memory ####################################
# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
# it _never_ swaps.
# #
# Set this property to true to lock the memory: # bootstrap.mlockall: true
# #
#bootstrap.mlockall: true # Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory
# available on the system and that the owner of the process is allowed to use this limit.
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
# to the same value, and that the machine has enough memory to allocate
# for Elasticsearch, leaving enough memory for the operating system itself.
# #
# You should also make sure that the Elasticsearch process is allowed to lock # Elasticsearch performs poorly when the system is swapping the memory.
# the memory, eg. by using `ulimit -l unlimited`.
############################## Network And HTTP ###############################
# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
# communication. (the range means that if the port is busy, it will automatically
# try the next port).
# Set the bind address specifically (IPv4 or IPv6):
# #
#network.bind_host: 192.168.0.1 # ---------------------------------- Network -----------------------------------
# Set the address other nodes will use to communicate with this node. If not
# set, it is automatically derived. It must point to an actual IP address.
# #
#network.publish_host: 192.168.0.1 # Set the bind adress to a specific IP (IPv4 or IPv6):
# Set both 'bind_host' and 'publish_host':
# #
#network.host: 192.168.0.1 # network.host: 192.168.0.1
# Set a custom port for the node to node communication (9300 by default):
# #
#transport.tcp.port: 9300 # Set a custom port for HTTP:
# Enable compression for all communication between nodes (disabled by default):
# #
#transport.tcp.compress: true # http.port: 9200
# Set a custom port to listen for HTTP traffic:
# #
#http.port: 9200 # For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html>
# Set a custom allowed content length:
# #
#http.max_content_length: 100mb # ---------------------------------- Gateway -----------------------------------
# Disable HTTP completely:
# #
#http.enabled: false # Block initial recovery after a full cluster restart until N nodes are started:
################################### Gateway ###################################
# The gateway allows for persisting the cluster state between full cluster
# restarts. Every change to the state (such as adding an index) will be stored
# in the gateway, and when the cluster starts up for the first time,
# it will read its state from the gateway.
# For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
# Settings below control how and when to start the initial recovery process on
# a full cluster restart (to reuse as much local data as possible when using shared
# gateway).
# Allow recovery process after N nodes in a cluster are up:
# #
#gateway.recover_after_nodes: 1 # gateway.recover_after_nodes: 3
# Set the timeout to initiate the recovery process, once the N nodes
# from previous setting are up (accepts time value):
# #
#gateway.recover_after_time: 5m # For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html>
# Set how many nodes are expected in this cluster. Once these N nodes
# are up (and recover_after_nodes is met), begin recovery process immediately
# (without waiting for recover_after_time to expire):
# #
#gateway.expected_nodes: 2 # --------------------------------- Discovery ----------------------------------
############################# Recovery Throttling #############################
# These settings allow to control the process of shards allocation between
# nodes during initial recovery, replica allocation, rebalancing,
# or when adding and removing nodes.
# Set the number of concurrent recoveries happening on a node:
# #
# 1. During the initial recovery # Elasticsearch nodes will find each other via multicast, by default.
# #
#cluster.routing.allocation.node_initial_primaries_recoveries: 4 # To use the unicast discovery, disable the multicast discovery:
# #
# 2. During adding/removing nodes, rebalancing, etc # discovery.zen.ping.multicast.enabled: false
# #
#cluster.routing.allocation.node_concurrent_recoveries: 2 # Pass an initial list of hosts to perform discovery when new node is started:
# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
# #
#indices.recovery.max_bytes_per_sec: 20mb # discovery.zen.ping.unicast.hosts: ["host1", "host2"]
# Set to limit the number of open concurrent streams when
# recovering a shard from a peer:
# #
#indices.recovery.concurrent_streams: 5 # Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
################################## Discovery ##################################
# Discovery infrastructure ensures nodes can be found within a cluster
# and master node is elected. Multicast discovery is the default.
# Set to ensure a node sees N other master eligible nodes to be considered
# operational within the cluster. This should be set to a quorum/majority of
# the master-eligible nodes in the cluster.
# #
#discovery.zen.minimum_master_nodes: 1 # discovery.zen.minimum_master_nodes: 3
# Set the time to wait for ping responses from other nodes when discovering.
# Set this option to a higher value on a slow or congested network
# to minimize discovery failures:
# #
#discovery.zen.ping.timeout: 3s # For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html>
# For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
# Unicast discovery allows to explicitly control which nodes will be used
# to discover the cluster. It can be used when multicast is not present,
# or to restrict the cluster communication-wise.
# #
# 1. Disable multicast discovery (enabled by default): # ---------------------------------- Various -----------------------------------
# #
#discovery.zen.ping.multicast.enabled: false # Disable starting multiple nodes on a single system:
# #
# 2. Configure an initial list of master nodes in the cluster # node.max_local_storage_nodes: 1
# to perform discovery when new nodes (master or data) are started:
# #
#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"] # Require explicit names when deleting indices:
# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
# #
# You have to install the cloud-aws plugin for enabling the EC2 discovery. # action.destructive_requires_name: true
#
# For more information, see
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
#
# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
# for a step-by-step tutorial.
# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
#
# You have to install the cloud-gce plugin for enabling the GCE discovery.
#
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
# Azure discovery allows to use Azure API in order to perform discovery.
#
# You have to install the cloud-azure plugin for enabling the Azure discovery.
#
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
################################## Slow Log ##################################
# Shard level query and fetch threshold logging.
#index.search.slowlog.threshold.query.warn: 10s
#index.search.slowlog.threshold.query.info: 5s
#index.search.slowlog.threshold.query.debug: 2s
#index.search.slowlog.threshold.query.trace: 500ms
#index.search.slowlog.threshold.fetch.warn: 1s
#index.search.slowlog.threshold.fetch.info: 800ms
#index.search.slowlog.threshold.fetch.debug: 500ms
#index.search.slowlog.threshold.fetch.trace: 200ms
#index.indexing.slowlog.threshold.index.warn: 10s
#index.indexing.slowlog.threshold.index.info: 5s
#index.indexing.slowlog.threshold.index.debug: 2s
#index.indexing.slowlog.threshold.index.trace: 500ms
################################## GC Logging ################################
#monitor.jvm.gc.young.warn: 1000ms
#monitor.jvm.gc.young.info: 700ms
#monitor.jvm.gc.young.debug: 400ms
#monitor.jvm.gc.old.warn: 10s
#monitor.jvm.gc.old.info: 5s
#monitor.jvm.gc.old.debug: 2s

View File

@ -39,9 +39,6 @@ org.apache.lucene.index.IndexReader#decRef()
org.apache.lucene.index.IndexReader#incRef() org.apache.lucene.index.IndexReader#incRef()
org.apache.lucene.index.IndexReader#tryIncRef() org.apache.lucene.index.IndexReader#tryIncRef()
@defaultMessage QueryWrapperFilter is cacheable by default - use Queries#wrap instead
org.apache.lucene.search.QueryWrapperFilter#<init>(org.apache.lucene.search.Query)
@defaultMessage Pass the precision step from the mappings explicitly instead @defaultMessage Pass the precision step from the mappings explicitly instead
org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean) org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean) org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)

View File

@ -50,13 +50,13 @@ See the {client}/ruby-api/current/index.html[official Elasticsearch Ruby client]
* https://github.com/ddnexus/flex[Flex]: * https://github.com/ddnexus/flex[Flex]:
Ruby Client. Ruby Client.
* https://github.com/printercu/elastics-rb[elastics]: * https://github.com/printercu/elastics-rb[elastics]:
Tiny client with built-in zero-downtime migrations and ActiveRecord integration. Tiny client with built-in zero-downtime migrations and ActiveRecord integration.
* https://github.com/toptal/chewy[chewy]: * https://github.com/toptal/chewy[chewy]:
Chewy is ODM and wrapper for official elasticsearch client Chewy is ODM and wrapper for official elasticsearch client
* https://github.com/ankane/searchkick[Searchkick]: * https://github.com/ankane/searchkick[Searchkick]:
Intelligent search made easy Intelligent search made easy
@ -82,7 +82,7 @@ See the {client}/php-api/current/index.html[official Elasticsearch PHP client].
* https://github.com/searchbox-io/Jest[Jest]: * https://github.com/searchbox-io/Jest[Jest]:
Java Rest client. Java Rest client.
* There is of course the http://www.elasticsearch.org/guide/en/elasticsearch/client/java-api/current/index.html[native ES Java client] * There is of course the {client}/java-api/current/index.html[native ES Java client]
[[community-javascript]] [[community-javascript]]
=== JavaScript === JavaScript

View File

@ -1,6 +1,6 @@
= Community Supported Clients = Community Supported Clients
:client: http://www.elasticsearch.org/guide/en/elasticsearch/client :client: http://www.elastic.co/guide/en/elasticsearch/client
include::clients.asciidoc[] include::clients.asciidoc[]

View File

@ -1,6 +1,6 @@
= Groovy API = Groovy API
:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current :ref: http://www.elastic.co/guide/en/elasticsearch/reference/current
:java: http://www.elasticsearch.org/guide/en/elasticsearch/client/java-api/current :java: http://www.elastic.co/guide/en/elasticsearch/client/java-api/current
[preface] [preface]
== Preface == Preface

View File

@ -1,6 +1,6 @@
[[java-api]] [[java-api]]
= Java API = Java API
:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current :ref: http://www.elastic.co/guide/en/elasticsearch/reference/current
[preface] [preface]
== Preface == Preface

View File

@ -234,7 +234,7 @@ QueryBuilder qb = matchAllQuery();
[[mlt]] [[mlt]]
=== More Like This (Field) Query (mlt and mlt_field) === More Like This Query (mlt)
See: See:
* {ref}/query-dsl-mlt-query.html[More Like This Query] * {ref}/query-dsl-mlt-query.html[More Like This Query]

View File

@ -1,138 +0,0 @@
= elasticsearch-js
== Overview
Official low-level client for Elasticsearch. Its goal is to provide common
ground for all Elasticsearch-related code in JavaScript; because of this it tries
to be opinion-free and very extendable.
The full documentation is available at http://elasticsearch.github.io/elasticsearch-js
=== Getting the Node.js module
To install the module into an existing Node.js project use npm:
[source,sh]
------------------------------------
npm install elasticsearch
------------------------------------
=== Getting the browser client
For a browser-based projects, builds for modern browsers are available http://elasticsearch.github.io/elasticsearch-js#browser-builds[here]. Download one of the archives and extract it, inside you'll find three files, pick the one that best matches your environment:
* elasticsearch.jquery.js - for projects that already use jQuery
* elasticsearch.angular.js - for Angular projects
* elasticsearch.js - generic build for all other projects
Each of the library specific builds tie into the AJAX and Promise creation facilities provided by their respective libraries. This is an example of how Elasticsearch.js can be extended to provide a more opinionated approach when appropriate.
=== Setting up the client
Now you are ready to get busy! First thing you'll need to do is create an instance of `elasticsearch.Client`. Here are several examples of configuration parameters you can use when creating that instance. For a full list of configuration options see http://elasticsearch.github.io/elasticsearch-js/index.html#configuration[the configuration docs].
[source,javascript]
------------------------------------
var elasticsearch = require('elasticsearch');
// Connect to localhost:9200 and use the default settings
var client = new elasticsearch.Client();
// Connect the client to two nodes, requests will be
// load-balanced between them using round-robin
var client = elasticsearch.Client({
hosts: [
'elasticsearch1:9200',
'elasticsearch2:9200'
]
});
// Connect to the this host's cluster, sniff
// for the rest of the cluster right away, and
// again every 5 minutes
var client = elasticsearch.Client({
host: 'elasticsearch1:9200',
sniffOnStart: true,
sniffInterval: 300000
});
// Connect to this host using https, basic auth,
// a path prefix, and static query string values
var client = new elasticsearch.Client({
host: 'https://user:password@elasticsearch1/search?app=blog'
});
------------------------------------
=== Setting up the client in the browser
The params accepted by the `Client` constructor are the same in the browser versions of the client, but how you access the Client constructor is different based on the build you are using. Below is an example of instantiating a client in each build.
[source,javascript]
------------------------------------
// elasticsearch.js adds the elasticsearch namespace to the window
var client = elasticsearch.Client({ ... });
// elasticsearch.jquery.js adds the es namespace to the jQuery object
var client = jQuery.es.Client({ ... });
// elasticsearch.angular.js creates an elasticsearch
// module, which provides an esFactory
var app = angular.module('app', ['elasticsearch']);
app.service('es', function (esFactory) {
return esFactory({ ... });
});
------------------------------------
=== Using the client instance to make API calls.
Once you create the client, making API calls is simple.
[source,javascript]
------------------------------------
// get the current status of the entire cluster.
// Note: params are always optional, you can just send a callback
client.cluster.health(function (err, resp) {
if (err) {
console.error(err.message);
} else {
console.dir(resp);
}
});
// index a document
client.index({
index: 'blog',
type: 'post',
id: 1,
body: {
title: 'JavaScript Everywhere!',
content: 'It all started when...',
date: '2013-12-17'
}
}, function (err, resp) {
// ...
});
// search for documents (and also promises!!)
client.search({
index: 'users',
size: 50,
body: {
query: {
match: {
profile: 'elasticsearch'
}
}
}
}).then(function (resp) {
var hits = resp.body.hits;
});
------------------------------------
== Copyright and License
This software is Copyright (c) 2013-2015 by Elasticsearch BV.
This is free software, licensed under The Apache License Version 2.0.

View File

@ -153,9 +153,6 @@ due to forced awareness or allocation filtering.
`indices.cache.filter.size`:: `indices.cache.filter.size`::
See <<index-modules-cache>> See <<index-modules-cache>>
`indices.cache.filter.expire` (time)::
See <<index-modules-cache>>
[float] [float]
==== TTL interval ==== TTL interval

View File

@ -89,7 +89,7 @@ The number of shards and replicas can be defined per index at the time the index
By default, each index in Elasticsearch is allocated 5 primary shards and 1 replica which means that if you have at least two nodes in your cluster, your index will have 5 primary shards and another 5 replica shards (1 complete replica) for a total of 10 shards per index. By default, each index in Elasticsearch is allocated 5 primary shards and 1 replica which means that if you have at least two nodes in your cluster, your index will have 5 primary shards and another 5 replica shards (1 complete replica) for a total of 10 shards per index.
NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents.
You can monitor shard sizes using the <<cat-shards,`_cat/shards`>> api. You can monitor shard sizes using the <<cat-shards,`_cat/shards`>> api.
With that out of the way, let's get started with the fun part... With that out of the way, let's get started with the fun part...
@ -104,13 +104,13 @@ java -version
echo $JAVA_HOME echo $JAVA_HOME
-------------------------------------------------- --------------------------------------------------
Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elasticsearch.org/download[`www.elasticsearch.org/download`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, or a `DEB` or `RPM` package. For simplicity, let's use the tar file. Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elastic.co/downloads[`www.elastic.co/downloads`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, or a `DEB` or `RPM` package. For simplicity, let's use the tar file.
Let's download the Elasticsearch {version} tar as follows (Windows users should download the zip package): Let's download the Elasticsearch {version} tar as follows (Windows users should download the zip package):
["source","sh",subs="attributes,callouts"] ["source","sh",subs="attributes,callouts"]
-------------------------------------------------- --------------------------------------------------
curl -L -O https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-{version}.tar.gz curl -L -O https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-{version}.tar.gz
-------------------------------------------------- --------------------------------------------------
Then extract it as follows (Windows users should unzip the zip package): Then extract it as follows (Windows users should unzip the zip package):
@ -868,7 +868,7 @@ In the previous section, we skipped over a little detail called the document sco
All queries in Elasticsearch trigger computation of the relevance scores. In cases where we do not need the relevance scores, Elasticsearch provides another query capability in the form of <<query-dsl-filters,filters>. Filters are similar in concept to queries except that they are optimized for much faster execution speeds for two primary reasons: All queries in Elasticsearch trigger computation of the relevance scores. In cases where we do not need the relevance scores, Elasticsearch provides another query capability in the form of <<query-dsl-filters,filters>. Filters are similar in concept to queries except that they are optimized for much faster execution speeds for two primary reasons:
* Filters do not score so they are faster to execute than queries * Filters do not score so they are faster to execute than queries
* Filters can be http://www.elasticsearch.org/blog/all-about-elasticsearch-filter-bitsets/[cached in memory] allowing repeated search executions to be significantly faster than queries * Filters can be http://www.elastic.co/blog/all-about-elasticsearch-filter-bitsets/[cached in memory] allowing repeated search executions to be significantly faster than queries
To understand filters, let's first introduce the <<query-dsl-filtered-query,`filtered` query>>, which allows you to combine a query (like `match_all`, `match`, `bool`, etc.) together with a filter. As an example, let's introduce the <<query-dsl-range-filter,`range` filter>>, which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. To understand filters, let's first introduce the <<query-dsl-filtered-query,`filtered` query>>, which allows you to combine a query (like `match_all`, `match`, `bool`, etc.) together with a filter. As an example, let's introduce the <<query-dsl-range-filter,`range` filter>>, which allows us to filter documents by a range of values. This is generally used for numeric or date filtering.

View File

@ -32,7 +32,7 @@ mapping specified in the <<indices-create-index,`create-index`>> or
`_default_` mapping. `_default_` mapping.
The default mapping definition is a plain mapping definition that is The default mapping definition is a plain mapping definition that is
embedded within ElasticSearch: embedded within Elasticsearch:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
@ -46,11 +46,8 @@ Pretty short, isn't it? Basically, everything is `_default_`ed, including the
dynamic nature of the root object mapping which allows new fields to be added dynamic nature of the root object mapping which allows new fields to be added
automatically. automatically.
The built-in default mapping definition can be overridden in several ways. A The default mapping can be overridden by specifying the `_default_` type when
`_default_` mapping can be specified when creating a new index, or the global creating a new index.
`_default_` mapping (for all indices) can be configured by creating a file
called `config/default-mapping.json`. (This location can be changed with
the `index.mapper.default_mapping_location` setting.)
Dynamic creation of mappings for unmapped types can be completely Dynamic creation of mappings for unmapped types can be completely
disabled by setting `index.mapper.dynamic` to `false`. disabled by setting `index.mapper.dynamic` to `false`.

View File

@ -71,8 +71,6 @@ include::mapping/date-format.asciidoc[]
include::mapping/dynamic-mapping.asciidoc[] include::mapping/dynamic-mapping.asciidoc[]
include::mapping/conf-mappings.asciidoc[]
include::mapping/meta.asciidoc[] include::mapping/meta.asciidoc[]
include::mapping/transform.asciidoc[] include::mapping/transform.asciidoc[]

View File

@ -1,19 +0,0 @@
[[mapping-conf-mappings]]
== Config Mappings
Creating new mappings can be done using the
<<indices-put-mapping,Put Mapping>>
API. When a document is indexed with no mapping associated with it in
the specific index, the
<<mapping-dynamic-mapping,dynamic / default
mapping>> feature will kick in and automatically create mapping
definition for it.
Mappings can also be provided on the node level, meaning that each index
created will automatically be started with all the mappings defined
within a certain location.
Mappings can be defined within files called `[mapping_name].json` and be
placed either under `config/mappings/_default` location, or under
`config/mappings/[index_name]` (for mappings that should be associated
only with a specific index).

View File

@ -21,12 +21,8 @@ embedded within the distribution:
-------------------------------------------------- --------------------------------------------------
Pretty short, isn't it? Basically, everything is defaulted, especially the Pretty short, isn't it? Basically, everything is defaulted, especially the
dynamic nature of the root object mapping. The default mapping dynamic nature of the root object mapping. The default mapping can be
definition can be overridden in several manners. The simplest manner is overridden by specifying the `_default_` type when creating a new index.
to simply define a file called `default-mapping.json` and to place it
under the `config` directory (which can be configured to exist in a
different location). It can also be explicitly set using the
`index.mapper.default_mapping_location` setting.
The dynamic creation of mappings for unmapped types can be completely The dynamic creation of mappings for unmapped types can be completely
disabled by setting `index.mapper.dynamic` to `false`. disabled by setting `index.mapper.dynamic` to `false`.

View File

@ -16,27 +16,6 @@ specifying the `tweet` type in the document itself:
} }
-------------------------------------------------- --------------------------------------------------
[float]
==== Index / Search Analyzers
The root object allows to define type mapping level analyzers for index
and search that will be used with all different fields that do not
explicitly set analyzers on their own. Here is an example:
[source,js]
--------------------------------------------------
{
"tweet" : {
"analyzer" : "standard",
"search_analyzer" : "standard_with_synonyms"
}
}
--------------------------------------------------
The above simply explicitly defines both the `analyzer` and
`search_analyzer` that will be used. If `search_analyzer` is not specified,
it defaults to the value of `analyzer`.
[float] [float]
==== dynamic_date_formats ==== dynamic_date_formats

View File

@ -362,7 +362,7 @@ in the query string.
=== Percolator === Percolator
The percolator has been redesigned and because of this the dedicated `_percolator` index is no longer used by the percolator, The percolator has been redesigned and because of this the dedicated `_percolator` index is no longer used by the percolator,
but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elasticsearch.org/blog/percolator-redesign-blog-post/[redesigned percolator] but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elastic.co/blog/percolator-redesign-blog-post[redesigned percolator]
blog post for the reasons why the percolator has been redesigned. blog post for the reasons why the percolator has been redesigned.
Elasticsearch will *not* delete the `_percolator` index when upgrading, only the percolate api will not use the queries Elasticsearch will *not* delete the `_percolator` index when upgrading, only the percolate api will not use the queries

View File

@ -312,6 +312,14 @@ They are always stored with doc values, and not indexed.
The `_source` field no longer supports `includes` and `excludes` paramters. When The `_source` field no longer supports `includes` and `excludes` paramters. When
`_source` is enabled, the entire original source will be stored. `_source` is enabled, the entire original source will be stored.
==== Config based mappings
The ability to specify mappings in configuration files has been removed. To specify
default mappings that apply to multiple indexes, use index templates.
The following settings are no longer valid:
* `index.mapper.default_mapping_location`
* `index.mapper.default_percolator_mapping_location`
=== Codecs === Codecs
It is no longer possible to specify per-field postings and doc values formats It is no longer possible to specify per-field postings and doc values formats
@ -410,6 +418,12 @@ favour or `bool`.
The `execution` option of the `terms` filter is now deprecated and ignored if The `execution` option of the `terms` filter is now deprecated and ignored if
provided. provided.
The `_cache` and `_cache_key` parameters of filters are deprecated in the REST
layer and removed in the Java API. In case they are specified they will be
ignored. Instead filters are always used as their own cache key and elasticsearch
makes decisions by itself about whether it should cache filters based on how
often they are used.
=== Snapshot and Restore === Snapshot and Restore
The obsolete parameters `expand_wildcards_open` and `expand_wildcards_close` are no longer The obsolete parameters `expand_wildcards_open` and `expand_wildcards_close` are no longer

View File

@ -26,7 +26,7 @@ plugin --install <org>/<user/component>/<version>
----------------------------------- -----------------------------------
The plugins will be The plugins will be
automatically downloaded in this case from `download.elasticsearch.org`, automatically downloaded in this case from `download.elastic.co`,
and in case they don't exist there, from maven (central and sonatype). and in case they don't exist there, from maven (central and sonatype).
Note that when the plugin is located in maven central or sonatype Note that when the plugin is located in maven central or sonatype

View File

@ -389,9 +389,23 @@ for details on what operators and functions are available.
Variables in `expression` scripts are available to access: Variables in `expression` scripts are available to access:
* Single valued document fields, e.g. `doc['myfield'].value` * Single valued document fields, e.g. `doc['myfield'].value`
* Single valued document fields can also be accessed without `.value` e.g. `doc['myfield']`
* Parameters passed into the script, e.g. `mymodifier` * Parameters passed into the script, e.g. `mymodifier`
* The current document's score, `_score` (only available when used in a `script_score`) * The current document's score, `_score` (only available when used in a `script_score`)
Variables in `expression` scripts that are of type `date` may use the following member methods:
* getYear()
* getMonth()
* getDayOfMonth()
* getHourOfDay()
* getMinutes()
* getSeconds()
The following example shows the difference in years between the `date` fields date0 and date1:
`doc['date1'].getYear() - doc['date0'].getYear()`
There are a few limitations relative to other script languages: There are a few limitations relative to other script languages:
* Only numeric fields may be accessed * Only numeric fields may be accessed

View File

@ -10,85 +10,14 @@ As a general rule, filters should be used instead of queries:
[[caching]] [[caching]]
=== Filters and Caching === Filters and Caching
Filters can be a great candidate for caching. Caching the result of a Filters can be a great candidate for caching. Caching the document set that
filter does not require a lot of memory, and will cause other queries a filter matches does not require much memory and can help improve
executing against the same filter (same parameters) to be blazingly execution speed of queries.
fast.
However the cost of caching is not the same for all filters. For Elasticsearch decides to cache filters based on how often they are used. For
instance some filters are already fast out of the box while caching could this reason you might occasionally see better performance by splitting
add significant overhead, and some filters produce results that are already complex filters into a static part that Elasticsearch will cache and a dynamic
cacheable so caching them is just a matter of putting the result in the part which is least costly than the original filter.
cache.
The default caching policy, `_cache: auto`, tracks the 1000 most recently
used filters on a per-index basis and makes decisions based on their
frequency.
[float]
==== Filters that read directly the index structure
Some filters can directly read the index structure and potentially jump
over large sequences of documents that are not worth evaluating (for
instance when these documents do not match the query). Caching these
filters introduces overhead given that all documents that the filter
matches need to be consumed in order to be loaded into the cache.
These filters, which include the <<query-dsl-term-filter,term>> and
<<query-dsl-term-query,query>> filters, are only cached after they
appear 5 times or more in the history of the 1000 most recently used
filters.
[float]
==== Filters that produce results that are already cacheable
Some filters produce results that are already cacheable, and the difference
between caching and not caching them is the act of placing the result in
the cache or not. These filters, which include the
<<query-dsl-terms-filter,terms>>,
<<query-dsl-prefix-filter,prefix>>, and
<<query-dsl-range-filter,range>> filters, are by default cached after they
appear twice or more in the history of the most 1000 recently used filters.
[float]
==== Computational filters
Some filters need to run some computation in order to figure out whether
a given document matches a filter. These filters, which include the geo and
<<query-dsl-script-filter,script>> filters, but also the
<<query-dsl-terms-filter,terms>> and <<query-dsl-range-filter,range>>
filters when using the `fielddata` execution mode are never cached by default,
as it would require to evaluate the filter on all documents in your indices
while they can otherwise be only evaluated on documents that match the query.
[float]
==== Compound filters
The last type of filters are those working with other filters, and includes
the <<query-dsl-bool-filter,bool>>,
<<query-dsl-and-filter,and>>,
<<query-dsl-not-filter,not>> and
<<query-dsl-or-filter,or>> filters.
There is no general rule about these filters. Depending on the filters that
they wrap, they will sometimes return a filter that dynamically evaluates the
sub filters and sometimes evaluate the sub filters eagerly in order to return
a result that is already cacheable, so depending on the case, these filters
will be cached after they appear 2+ or 5+ times in the history of the most
1000 recently used filters.
[float]
==== Overriding the default behaviour
All filters allow to set `_cache` element on them to explicitly control
caching. It accepts 3 values: `true` in order to cache the filter, `false`
to make sure that the filter will not be cached, and `auto`, which is the
default and will decide on whether to cache the filter based on the cost
to cache it and how often it has been used as explained above.
Filters also allow to set `_cache_key` which will be used as the
caching key for that filter. This can be handy when using very large
filters (like a terms filter with many elements in it).
include::filters/and-filter.asciidoc[] include::filters/and-filter.asciidoc[]

View File

@ -32,40 +32,3 @@ filters. Can be placed within queries that accept a filter.
} }
-------------------------------------------------- --------------------------------------------------
[float]
==== Caching
The result of the filter is only cached by default if there is evidence of
reuse. It is possible to opt-in explicitely for caching by setting `_cache`
to `true`. Since the `_cache` element requires to be set on the `and` filter
itself, the structure then changes a bit to have the filters provided within a
`filters` element:
[source,js]
--------------------------------------------------
{
"filtered" : {
"query" : {
"term" : { "name.first" : "shay" }
},
"filter" : {
"and" : {
"filters": [
{
"range" : {
"postDate" : {
"from" : "2010-03-01",
"to" : "2010-04-01"
}
}
},
{
"prefix" : { "name.second" : "ba" }
}
],
"_cache" : true
}
}
}
}
--------------------------------------------------

View File

@ -230,11 +230,3 @@ are not supported. Here is an example:
} }
-------------------------------------------------- --------------------------------------------------
[float]
==== Caching
The result of the filter is not cached by default. The `_cache` can be
set to `true` to cache the *result* of the filter. This is handy when
the same bounding box parameters are used on several (many) other
queries. Note, the process of caching the first execution is higher when
caching (since it needs to satisfy different queries).

View File

@ -172,11 +172,3 @@ The `geo_distance` filter can work with multiple locations / points per
document. Once a single location / point matches the filter, the document. Once a single location / point matches the filter, the
document will be included in the filter. document will be included in the filter.
[float]
==== Caching
The result of the filter is not cached by default. The `_cache` can be
set to `true` to cache the *result* of the filter. This is handy when
the same point and distance parameters are used on several (many) other
queries. Note, the process of caching the first execution is higher when
caching (since it needs to satisfy different queries).

View File

@ -116,11 +116,3 @@ The filter *requires* the
<<mapping-geo-point-type,geo_point>> type to be <<mapping-geo-point-type,geo_point>> type to be
set on the relevant field. set on the relevant field.
[float]
==== Caching
The result of the filter is not cached by default. The `_cache` can be
set to `true` to cache the *result* of the filter. This is handy when
the same points parameters are used on several (many) other queries.
Note, the process of caching the first execution is higher when caching
(since it needs to satisfy different queries).

View File

@ -110,12 +110,3 @@ shape:
} }
-------------------------------------------------- --------------------------------------------------
[float]
==== Caching
The result of the Filter is not cached by default. Setting `_cache` to
`true` will mean the results of the Filter will be cached. Since shapes
can contain 10s-100s of coordinates and any one differing means a new
shape, it may make sense to only using caching when you are sure that
the shapes will remain reasonably static.

View File

@ -61,10 +61,3 @@ next to the given cell.
} }
-------------------------------------------------- --------------------------------------------------
[float]
==== Caching
The result of the filter is not cached by default. The
`_cache` parameter can be set to `true` to turn caching on.
By default the filter uses the resulting geohash cells as a cache key.
This can be changed by using the `_cache_key` option.

View File

@ -88,9 +88,3 @@ APIS, eg:
curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human" curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human"
-------------------------------------------------- --------------------------------------------------
[float]
==== Caching
The `has_child` filter cannot be cached in the filter cache. The `_cache`
and `_cache_key` options are a no-op in this filter. Also any filter that
wraps the `has_child` filter either directly or indirectly will not be cached.

View File

@ -63,9 +63,3 @@ APIS, eg:
curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human" curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human"
-------------------------------------------------- --------------------------------------------------
[float]
==== Caching
The `has_parent` filter cannot be cached in the filter cache. The `_cache`
and `_cache_key` options are a no-op in this filter. Also any filter that
wraps the `has_parent` filter either directly or indirectly will not be cached.

View File

@ -2,10 +2,7 @@
=== Nested Filter === Nested Filter
A `nested` filter works in a similar fashion to the A `nested` filter works in a similar fashion to the
<<query-dsl-nested-query,nested>> query, except it's <<query-dsl-nested-query,nested>> query. For example:
used as a filter. It follows exactly the same structure, but also allows
to cache the results (set `_cache` to `true`), and have it named (set
the `_name` value). For example:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
@ -26,8 +23,7 @@ the `_name` value). For example:
} }
] ]
} }
}, }
"_cache" : true
} }
} }
} }

View File

@ -50,33 +50,3 @@ Or, in a longer form with a `filter` element:
} }
-------------------------------------------------- --------------------------------------------------
[float]
==== Caching
The result of the filter is only cached if there is evidence of reuse.
The `_cache` can be set to `true` in order to cache it (though usually
not needed). Here is an example:
[source,js]
--------------------------------------------------
{
"filtered" : {
"query" : {
"term" : { "name.first" : "shay" }
},
"filter" : {
"not" : {
"filter" : {
"range" : {
"postDate" : {
"from" : "2010-03-01",
"to" : "2010-04-01"
}
}
},
"_cache" : true
}
}
}
}
--------------------------------------------------

View File

@ -27,36 +27,3 @@ filters. Can be placed within queries that accept a filter.
} }
-------------------------------------------------- --------------------------------------------------
[float]
==== Caching
The result of the filter is only cached by default if there is evidence
of reuse. The `_cache` can be
set to `true` in order to cache it (though usually not needed). Since
the `_cache` element requires to be set on the `or` filter itself, the
structure then changes a bit to have the filters provided within a
`filters` element:
[source,js]
--------------------------------------------------
{
"filtered" : {
"query" : {
"term" : { "name.first" : "shay" }
},
"filter" : {
"or" : {
"filters" : [
{
"term" : { "name.second" : "banon" }
},
{
"term" : { "name.nick" : "kimchy" }
}
],
"_cache" : true
}
}
}
}
--------------------------------------------------

View File

@ -16,22 +16,3 @@ a filter. Can be placed within queries that accept a filter.
} }
-------------------------------------------------- --------------------------------------------------
[float]
==== Caching
The result of the filter is cached by default if there is evidence of reuse.
The `_cache` can be set to `true` in order to cache it. Here is an example:
[source,js]
--------------------------------------------------
{
"constant_score" : {
"filter" : {
"prefix" : {
"user" : "ki",
"_cache" : true
}
}
}
}
--------------------------------------------------

View File

@ -19,34 +19,3 @@ that accept a filter.
} }
-------------------------------------------------- --------------------------------------------------
[float]
==== Caching
The result of the filter is only cached by default if there is evidence of reuse.
The `_cache` can be
set to `true` to cache the *result* of the filter. This is handy when
the same query is used on several (many) other queries. Note, the
process of caching the first execution is higher when not caching (since
it needs to satisfy different queries).
Setting the `_cache` element requires a different format for the
`query`:
[source,js]
--------------------------------------------------
{
"constantScore" : {
"filter" : {
"fquery" : {
"query" : {
"query_string" : {
"query" : "this AND that OR thus"
}
},
"_cache" : true
}
}
}
}
--------------------------------------------------

View File

@ -95,11 +95,3 @@ requires more memory, so make sure you have sufficient memory on your nodes in
order to use this execution mode. It usually makes sense to use it on fields order to use this execution mode. It usually makes sense to use it on fields
you're already aggregating or sorting by. you're already aggregating or sorting by.
[float]
==== Caching
The result of the filter is only cached by default if there is evidence of reuse. The
`_cache` can be set to `false` to turn it off.
Having the `now` expression used without rounding will make the filter unlikely to be
cached since reuse is very unlikely.

View File

@ -51,9 +51,7 @@ You have to enable caching explicitly in order to have the
"flags" : "INTERSECTION|COMPLEMENT|EMPTY", "flags" : "INTERSECTION|COMPLEMENT|EMPTY",
"max_determinized_states": 20000 "max_determinized_states": 20000
}, },
"_name":"test", "_name":"test"
"_cache" : true,
"_cache_key" : "key"
} }
} }
} }

View File

@ -43,11 +43,3 @@ to use the ability to pass parameters to the script itself, for example:
} }
---------------------------------------------- ----------------------------------------------
[float]
==== Caching
The result of the filter is not cached by default. The `_cache` can be
set to `true` to cache the *result* of the filter. This is handy when
the same script and parameters are used on several (many) other queries.
Note, the process of caching the first execution is higher when caching
(since it needs to satisfy different queries).

View File

@ -17,22 +17,3 @@ accept a filter, for example:
} }
-------------------------------------------------- --------------------------------------------------
[float]
==== Caching
The result of the filter is only cached by default if there is evidence of reuse.
The `_cache` can be set to `false` to turn it off. Here is an example:
[source,js]
--------------------------------------------------
{
"constant_score" : {
"filter" : {
"term" : {
"user" : "kimchy",
"_cache" : false
}
}
}
}
--------------------------------------------------

View File

@ -18,13 +18,6 @@ Filters documents that have fields that match any of the provided terms
The `terms` filter is also aliased with `in` as the filter name for The `terms` filter is also aliased with `in` as the filter name for
simpler usage. simpler usage.
[float]
==== Caching
The result of the filter is cached if there is evidence of reuse. It is
possible to enable caching explicitely by setting `_cache` to `true` and
to disable caching by setting `_cache` to `false`.
[float] [float]
==== Terms lookup mechanism ==== Terms lookup mechanism
@ -93,8 +86,7 @@ curl -XGET localhost:9200/tweets/_search -d '{
"type" : "user", "type" : "user",
"id" : "2", "id" : "2",
"path" : "followers" "path" : "followers"
}, }
"_cache_key" : "user_2_friends"
} }
} }
} }
@ -102,10 +94,6 @@ curl -XGET localhost:9200/tweets/_search -d '{
}' }'
-------------------------------------------------- --------------------------------------------------
If there are lots of matching values, then `_cache_key` is recommended to be set,
so that the filter cache will not store a reference to the potentially heavy
terms filter.
The structure of the external terms document can also include array of The structure of the external terms document can also include array of
inner objects, for example: inner objects, for example:

View File

@ -52,6 +52,8 @@ include::queries/range-query.asciidoc[]
include::queries/regexp-query.asciidoc[] include::queries/regexp-query.asciidoc[]
include::queries/span-containing-query.asciidoc[]
include::queries/span-first-query.asciidoc[] include::queries/span-first-query.asciidoc[]
include::queries/span-multi-term-query.asciidoc[] include::queries/span-multi-term-query.asciidoc[]
@ -64,6 +66,8 @@ include::queries/span-or-query.asciidoc[]
include::queries/span-term-query.asciidoc[] include::queries/span-term-query.asciidoc[]
include::queries/span-within-query.asciidoc[]
include::queries/term-query.asciidoc[] include::queries/term-query.asciidoc[]
include::queries/terms-query.asciidoc[] include::queries/terms-query.asciidoc[]

View File

@ -0,0 +1,29 @@
[[query-dsl-span-containing-query]]
=== Span Containing Query
Returns matches which enclose another span query. The span containing
query maps to Lucene `SpanContainingQuery`. Here is an example:
[source,js]
--------------------------------------------------
{
"span_containing" : {
"little" : {
"span_term" : { "field1" : "foo" }
},
"big" : {
"span_near" : {
"clauses" : [
{ "span_term" : { "field1" : "bar" } },
{ "span_term" : { "field1" : "baz" } }
],
"slop" : 5,
"in_order" : true
}
}
}
}
--------------------------------------------------
The `big` and `little` clauses can be any span type query. Matching
spans from `big` that contain matches from `little` are returned.

View File

@ -0,0 +1,29 @@
[[query-dsl-span-within-query]]
=== Span Within Query
Returns matches which are enclosed inside another span query. The span within
query maps to Lucene `SpanWithinQuery`. Here is an example:
[source,js]
--------------------------------------------------
{
"span_within" : {
"little" : {
"span_term" : { "field1" : "foo" }
},
"big" : {
"span_near" : {
"clauses" : [
{ "span_term" : { "field1" : "bar" } },
{ "span_term" : { "field1" : "baz" } }
],
"slop" : 5,
"in_order" : true
}
}
}
}
--------------------------------------------------
The `big` and `little` clauses can be any span type query. Matching
spans from `little` that are enclosed within `big` are returned.

View File

@ -4,7 +4,7 @@
[partintro] [partintro]
-- --
This section includes information on how to setup *elasticsearch* and This section includes information on how to setup *elasticsearch* and
get it running. If you haven't already, http://www.elasticsearch.org/download[download] it, and get it running. If you haven't already, http://www.elastic.co/downloads[download] it, and
then check the <<setup-installation,installation>> docs. then check the <<setup-installation,installation>> docs.
NOTE: Elasticsearch can also be installed from our repositories using `apt` or `yum`. NOTE: Elasticsearch can also be installed from our repositories using `apt` or `yum`.

View File

@ -22,14 +22,14 @@ Download and install the Public Signing Key:
[source,sh] [source,sh]
-------------------------------------------------- --------------------------------------------------
wget -qO - https://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add - wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
-------------------------------------------------- --------------------------------------------------
Add the repository definition to your `/etc/apt/sources.list` file: Add the repository definition to your `/etc/apt/sources.list` file:
["source","sh",subs="attributes,callouts"] ["source","sh",subs="attributes,callouts"]
-------------------------------------------------- --------------------------------------------------
echo "deb http://packages.elasticsearch.org/elasticsearch/{branch}/debian stable main" | sudo tee -a /etc/apt/sources.list echo "deb http://packages.elastic.co/elasticsearch/{branch}/debian stable main" | sudo tee -a /etc/apt/sources.list
-------------------------------------------------- --------------------------------------------------
[WARNING] [WARNING]
@ -65,7 +65,7 @@ Download and install the public signing key:
[source,sh] [source,sh]
-------------------------------------------------- --------------------------------------------------
rpm --import https://packages.elasticsearch.org/GPG-KEY-elasticsearch rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch
-------------------------------------------------- --------------------------------------------------
Add the following in your `/etc/yum.repos.d/` directory Add the following in your `/etc/yum.repos.d/` directory
@ -75,9 +75,9 @@ in a file with a `.repo` suffix, for example `elasticsearch.repo`
-------------------------------------------------- --------------------------------------------------
[elasticsearch-{branch}] [elasticsearch-{branch}]
name=Elasticsearch repository for {branch}.x packages name=Elasticsearch repository for {branch}.x packages
baseurl=http://packages.elasticsearch.org/elasticsearch/{branch}/centos baseurl=http://packages.elastic.co/elasticsearch/{branch}/centos
gpgcheck=1 gpgcheck=1
gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1 enabled=1
-------------------------------------------------- --------------------------------------------------

View File

@ -69,7 +69,7 @@ $ curl -XPUT 'http://localhost:9200/_cluster/settings' -d '{
[float] [float]
==== 1.0 and later ==== 1.0 and later
To back up a running 1.0 or later system, it is simplest to use the snapshot feature. Complete instructions for backup and restore with snapshots are available http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-snapshots.html[here]. To back up a running 1.0 or later system, it is simplest to use the snapshot feature. See the complete instructions for <<modules-snapshots,backup and restore with snapshots>>.
[float] [float]
[[rolling-upgrades]] [[rolling-upgrades]]
@ -96,7 +96,7 @@ This syntax applies to Elasticsearch 1.0 and later:
* Confirm that all shards are correctly reallocated to the remaining running nodes. * Confirm that all shards are correctly reallocated to the remaining running nodes.
* Upgrade the stopped node. To upgrade using a zip or compressed tarball from elasticsearch.org: * Upgrade the stopped node. To upgrade using a zip or compressed tarball from elastic.co:
** Extract the zip or tarball to a new directory, usually in the same volume as the current Elasticsearch installation. Do not overwrite the existing installation, as the downloaded archive will contain a default elasticsearch.yml file and will overwrite your existing configuration. ** Extract the zip or tarball to a new directory, usually in the same volume as the current Elasticsearch installation. Do not overwrite the existing installation, as the downloaded archive will contain a default elasticsearch.yml file and will overwrite your existing configuration.
** Copy the configuration files from the old Elasticsearch installation's config directory to the new Elasticsearch installation's config directory. Move data files from the old Elasticsesarch installation's data directory if necessary. If data files are not located within the tarball's extraction directory, they will not have to be moved. ** Copy the configuration files from the old Elasticsearch installation's config directory to the new Elasticsearch installation's config directory. Move data files from the old Elasticsesarch installation's data directory if necessary. If data files are not located within the tarball's extraction directory, they will not have to be moved.
** The simplest solution for moving from one version to another is to have a symbolic link for 'elasticsearch' that points to the currently running version. This link can be easily updated and will provide a stable access point to the most recent version. Update this symbolic link if it is being used. ** The simplest solution for moving from one version to another is to have a symbolic link for 'elasticsearch' that points to the currently running version. This link can be easily updated and will provide a stable access point to the most recent version. Update this symbolic link if it is being used.

View File

@ -22,10 +22,10 @@ improvements throughout this page to provide the full context.
If youre interested in more on how we approach ensuring resiliency in If youre interested in more on how we approach ensuring resiliency in
Elasticsearch, you may be interested in Igor Motovs recent talk Elasticsearch, you may be interested in Igor Motovs recent talk
http://www.elasticsearch.org/videos/improving-elasticsearch-resiliency/[Improving Elasticsearch Resiliency]. http://www.elastic.co/videos/improving-elasticsearch-resiliency[Improving Elasticsearch Resiliency].
You may also be interested in our blog post You may also be interested in our blog post
http://www.elasticsearch.org/blog/resiliency-elasticsearch/[Resiliency in Elasticsearch], http://www.elastic.co/blog/resiliency-elasticsearch[Resiliency in Elasticsearch],
which details our thought processes when addressing resiliency in both which details our thought processes when addressing resiliency in both
Elasticsearch and the work our developers do upstream in Apache Lucene. Elasticsearch and the work our developers do upstream in Apache Lucene.
@ -416,7 +416,7 @@ The Snapshot/Restore API supports a number of different repository types for sto
[float] [float]
=== Circuit Breaker: Fielddata (STATUS: DONE, v1.0.0) === Circuit Breaker: Fielddata (STATUS: DONE, v1.0.0)
Currently, the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0. Currently, the https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0.
[float] [float]
=== Use of Paginated Data Structures to Ease Garbage Collection (STATUS: DONE, v1.0.0 & v1.2.0) === Use of Paginated Data Structures to Ease Garbage Collection (STATUS: DONE, v1.0.0 & v1.2.0)

View File

@ -32,7 +32,7 @@
<properties> <properties>
<lucene.version>5.2.0</lucene.version> <lucene.version>5.2.0</lucene.version>
<lucene.snapshot.revision>1675927</lucene.snapshot.revision> <lucene.snapshot.revision>1677039</lucene.snapshot.revision>
<lucene.maven.version>5.2.0-snapshot-${lucene.snapshot.revision}</lucene.maven.version> <lucene.maven.version>5.2.0-snapshot-${lucene.snapshot.revision}</lucene.maven.version>
<testframework.version>2.1.14</testframework.version> <testframework.version>2.1.14</testframework.version>
<tests.jvms>auto</tests.jvms> <tests.jvms>auto</tests.jvms>
@ -267,6 +267,12 @@
<artifactId>jackson-dataformat-yaml</artifactId> <artifactId>jackson-dataformat-yaml</artifactId>
<version>2.5.1</version> <version>2.5.1</version>
<scope>compile</scope> <scope>compile</scope>
<exclusions>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</exclusion>
</exclusions>
</dependency> </dependency>
<dependency> <dependency>

View File

@ -32,10 +32,6 @@
"type" : "boolean", "type" : "boolean",
"description" : "Clear filter caches" "description" : "Clear filter caches"
}, },
"filter_keys": {
"type" : "boolean",
"description" : "A comma-separated list of keys to clear when using the `filter_cache` parameter (default: all)"
},
"id": { "id": {
"type" : "boolean", "type" : "boolean",
"description" : "Clear ID caches for parent/child" "description" : "Clear ID caches for parent/child"

View File

@ -18,9 +18,9 @@
package org.apache.lucene.search.postingshighlight; package org.apache.lucene.search.postingshighlight;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
@ -91,8 +91,7 @@ public final class CustomPostingsHighlighter extends XPostingsHighlighter {
/* /*
Our own api to highlight a single document field, passing in the query terms, and get back our own Snippet object Our own api to highlight a single document field, passing in the query terms, and get back our own Snippet object
*/ */
public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexSearcher searcher, int docId, int maxPassages) throws IOException { public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexReader reader, int docId, int maxPassages) throws IOException {
IndexReader reader = searcher.getIndexReader();
IndexReaderContext readerContext = reader.getContext(); IndexReaderContext readerContext = reader.getContext();
List<LeafReaderContext> leaves = readerContext.leaves(); List<LeafReaderContext> leaves = readerContext.leaves();

View File

@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.stats;
import com.carrotsearch.hppc.ObjectObjectOpenHashMap; import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStats;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;

View File

@ -37,7 +37,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
private boolean recycler = false; private boolean recycler = false;
private boolean queryCache = false; private boolean queryCache = false;
private String[] fields = null; private String[] fields = null;
private String[] filterKeys = null;
ClearIndicesCacheRequest() { ClearIndicesCacheRequest() {
@ -83,15 +82,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
return this.fields; return this.fields;
} }
public ClearIndicesCacheRequest filterKeys(String... filterKeys) {
this.filterKeys = filterKeys;
return this;
}
public String[] filterKeys() {
return this.filterKeys;
}
public boolean idCache() { public boolean idCache() {
return this.idCache; return this.idCache;
} }
@ -118,7 +108,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
idCache = in.readBoolean(); idCache = in.readBoolean();
recycler = in.readBoolean(); recycler = in.readBoolean();
fields = in.readStringArray(); fields = in.readStringArray();
filterKeys = in.readStringArray();
queryCache = in.readBoolean(); queryCache = in.readBoolean();
} }
@ -130,7 +119,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
out.writeBoolean(idCache); out.writeBoolean(idCache);
out.writeBoolean(recycler); out.writeBoolean(recycler);
out.writeStringArrayNullable(fields); out.writeStringArrayNullable(fields);
out.writeStringArrayNullable(filterKeys);
out.writeBoolean(queryCache); out.writeBoolean(queryCache);
} }
} }

View File

@ -53,11 +53,6 @@ public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBu
return this; return this;
} }
public ClearIndicesCacheRequestBuilder setFilterKeys(String... filterKeys) {
request.filterKeys(filterKeys);
return this;
}
public ClearIndicesCacheRequestBuilder setIdCache(boolean idCache) { public ClearIndicesCacheRequestBuilder setIdCache(boolean idCache) {
request.idCache(idCache); request.idCache(idCache);
return this; return this;

View File

@ -39,7 +39,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
private boolean queryCache = false; private boolean queryCache = false;
private String[] fields = null; private String[] fields = null;
private String[] filterKeys = null;
ShardClearIndicesCacheRequest() { ShardClearIndicesCacheRequest() {
} }
@ -50,7 +49,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
fieldDataCache = request.fieldDataCache(); fieldDataCache = request.fieldDataCache();
idCache = request.idCache(); idCache = request.idCache();
fields = request.fields(); fields = request.fields();
filterKeys = request.filterKeys();
recycler = request.recycler(); recycler = request.recycler();
queryCache = request.queryCache(); queryCache = request.queryCache();
} }
@ -79,10 +77,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
return this.fields; return this.fields;
} }
public String[] filterKeys() {
return this.filterKeys;
}
public ShardClearIndicesCacheRequest waitForOperations(boolean waitForOperations) { public ShardClearIndicesCacheRequest waitForOperations(boolean waitForOperations) {
this.filterCache = waitForOperations; this.filterCache = waitForOperations;
return this; return this;
@ -96,7 +90,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
idCache = in.readBoolean(); idCache = in.readBoolean();
recycler = in.readBoolean(); recycler = in.readBoolean();
fields = in.readStringArray(); fields = in.readStringArray();
filterKeys = in.readStringArray();
queryCache = in.readBoolean(); queryCache = in.readBoolean();
} }
@ -108,7 +101,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
out.writeBoolean(idCache); out.writeBoolean(idCache);
out.writeBoolean(recycler); out.writeBoolean(recycler);
out.writeStringArrayNullable(fields); out.writeStringArrayNullable(fields);
out.writeStringArrayNullable(filterKeys);
out.writeBoolean(queryCache); out.writeBoolean(queryCache);
} }
} }

View File

@ -106,10 +106,6 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio
clearedAtLeastOne = true; clearedAtLeastOne = true;
service.cache().filter().clear("api"); service.cache().filter().clear("api");
} }
if (request.filterKeys() != null && request.filterKeys().length > 0) {
clearedAtLeastOne = true;
service.cache().filter().clear("api", request.filterKeys());
}
if (request.fieldDataCache()) { if (request.fieldDataCache()) {
clearedAtLeastOne = true; clearedAtLeastOne = true;
if (request.fields() == null || request.fields().length == 0) { if (request.fields() == null || request.fields().length == 0) {

View File

@ -1,245 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation.allocator;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
/**
* A {@link ShardsAllocator} that tries to balance shards across nodes in the
* cluster such that each node holds approximatly the same number of shards. The
* allocations algorithm operates on a cluster ie. is index-agnostic. While the
* number of shards per node might be balanced across the cluster a single node
* can hold mulitple shards from a single index such that the shard of an index
* are not necessarily balanced across nodes. Yet, due to high-level
* {@link AllocationDecider decisions} multiple instances of the same shard
* won't be allocated on the same node.
* <p>
* During {@link #rebalance(RoutingAllocation) re-balancing} the allocator takes
* shards from the <tt>most busy</tt> nodes and tries to relocate the shards to
* the least busy node until the number of shards per node are equal for all
* nodes in the cluster or until no shards can be relocated anymore.
* </p>
*/
public class EvenShardsCountAllocator extends AbstractComponent implements ShardsAllocator {
@Inject
public EvenShardsCountAllocator(Settings settings) {
super(settings);
}
@Override
public void applyStartedShards(StartedRerouteAllocation allocation) {
}
@Override
public void applyFailedShards(FailedRerouteAllocation allocation) {
}
@Override
public boolean allocateUnassigned(RoutingAllocation allocation) {
boolean changed = false;
RoutingNodes routingNodes = allocation.routingNodes();
/*
* 1. order nodes by the number of shards allocated on them least one first (this takes relocation into account)
* ie. if a shard is relocating the target nodes shard count is incremented.
* 2. iterate over the unassigned shards
* 2a. find the least busy node in the cluster that allows allocation for the current unassigned shard
* 2b. if a node is found add the shard to the node and remove it from the unassigned shards
* 3. iterate over the remaining unassigned shards and try to allocate them on next possible node
*/
// order nodes by number of shards (asc)
RoutingNode[] nodes = sortedNodesLeastToHigh(allocation);
Iterator<MutableShardRouting> unassignedIterator = routingNodes.unassigned().iterator();
int lastNode = 0;
while (unassignedIterator.hasNext()) {
MutableShardRouting shard = unassignedIterator.next();
// do the allocation, finding the least "busy" node
for (int i = 0; i < nodes.length; i++) {
RoutingNode node = nodes[lastNode];
lastNode++;
if (lastNode == nodes.length) {
lastNode = 0;
}
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
if (decision.type() == Decision.Type.YES) {
int numberOfShardsToAllocate = routingNodes.requiredAverageNumberOfShardsPerNode() - node.size();
if (numberOfShardsToAllocate <= 0) {
continue;
}
changed = true;
allocation.routingNodes().assign(shard, node.nodeId());
unassignedIterator.remove();
break;
}
}
}
// allocate all the unassigned shards above the average per node.
for (Iterator<MutableShardRouting> it = routingNodes.unassigned().iterator(); it.hasNext(); ) {
MutableShardRouting shard = it.next();
// go over the nodes and try and allocate the remaining ones
for (RoutingNode routingNode : sortedNodesLeastToHigh(allocation)) {
Decision decision = allocation.deciders().canAllocate(shard, routingNode, allocation);
if (decision.type() == Decision.Type.YES) {
changed = true;
allocation.routingNodes().assign(shard, routingNode.nodeId());
it.remove();
break;
}
}
}
return changed;
}
@Override
public boolean rebalance(RoutingAllocation allocation) {
// take shards form busy nodes and move them to less busy nodes
boolean changed = false;
RoutingNode[] sortedNodesLeastToHigh = sortedNodesLeastToHigh(allocation);
if (sortedNodesLeastToHigh.length == 0) {
return false;
}
int lowIndex = 0;
int highIndex = sortedNodesLeastToHigh.length - 1;
boolean relocationPerformed;
do {
relocationPerformed = false;
while (lowIndex != highIndex) {
RoutingNode lowRoutingNode = sortedNodesLeastToHigh[lowIndex];
RoutingNode highRoutingNode = sortedNodesLeastToHigh[highIndex];
int averageNumOfShards = allocation.routingNodes().requiredAverageNumberOfShardsPerNode();
// only active shards can be removed so must count only active ones.
if (highRoutingNode.numberOfOwningShards() <= averageNumOfShards) {
highIndex--;
continue;
}
if (lowRoutingNode.size() >= averageNumOfShards) {
lowIndex++;
continue;
}
// Take a started shard from a "busy" node and move it to less busy node and go on
boolean relocated = false;
List<MutableShardRouting> startedShards = highRoutingNode.shardsWithState(STARTED);
for (MutableShardRouting startedShard : startedShards) {
Decision rebalanceDecision = allocation.deciders().canRebalance(startedShard, allocation);
if (rebalanceDecision.type() == Decision.Type.NO) {
continue;
}
Decision allocateDecision = allocation.deciders().canAllocate(startedShard, lowRoutingNode, allocation);
if (allocateDecision.type() == Decision.Type.YES) {
changed = true;
allocation.routingNodes().assign(new MutableShardRouting(startedShard.index(), startedShard.id(),
lowRoutingNode.nodeId(), startedShard.currentNodeId(), startedShard.restoreSource(),
startedShard.primary(), INITIALIZING, startedShard.version() + 1), lowRoutingNode.nodeId());
allocation.routingNodes().relocate(startedShard, lowRoutingNode.nodeId());
relocated = true;
relocationPerformed = true;
break;
}
}
if (!relocated) {
highIndex--;
}
}
} while (relocationPerformed);
return changed;
}
@Override
public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
if (!shardRouting.started()) {
return false;
}
boolean changed = false;
RoutingNode[] sortedNodesLeastToHigh = sortedNodesLeastToHigh(allocation);
if (sortedNodesLeastToHigh.length == 0) {
return false;
}
for (RoutingNode nodeToCheck : sortedNodesLeastToHigh) {
// check if its the node we are moving from, no sense to check on it
if (nodeToCheck.nodeId().equals(node.nodeId())) {
continue;
}
Decision decision = allocation.deciders().canAllocate(shardRouting, nodeToCheck, allocation);
if (decision.type() == Decision.Type.YES) {
allocation.routingNodes().assign(new MutableShardRouting(shardRouting.index(), shardRouting.id(),
nodeToCheck.nodeId(), shardRouting.currentNodeId(), shardRouting.restoreSource(),
shardRouting.primary(), INITIALIZING, shardRouting.version() + 1), nodeToCheck.nodeId());
allocation.routingNodes().relocate(shardRouting, nodeToCheck.nodeId());
changed = true;
break;
}
}
return changed;
}
private RoutingNode[] sortedNodesLeastToHigh(RoutingAllocation allocation) {
// create count per node id, taking into account relocations
final ObjectIntOpenHashMap<String> nodeCounts = new ObjectIntOpenHashMap<>();
for (RoutingNode node : allocation.routingNodes()) {
for (int i = 0; i < node.size(); i++) {
ShardRouting shardRouting = node.get(i);
String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId();
nodeCounts.addTo(nodeId, 1);
}
}
RoutingNode[] nodes = allocation.routingNodes().toArray();
Arrays.sort(nodes, new Comparator<RoutingNode>() {
@Override
public int compare(RoutingNode o1, RoutingNode o2) {
return nodeCounts.get(o1.nodeId()) - nodeCounts.get(o2.nodeId());
}
});
return nodes;
}
}

View File

@ -20,6 +20,8 @@
package org.elasticsearch.cluster.routing.allocation.allocator; package org.elasticsearch.cluster.routing.allocation.allocator;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.GatewayAllocator;
@ -27,7 +29,7 @@ import org.elasticsearch.gateway.GatewayAllocator;
*/ */
public class ShardsAllocatorModule extends AbstractModule { public class ShardsAllocatorModule extends AbstractModule {
public static final String EVEN_SHARD_COUNT_ALLOCATOR_KEY = "even_shard"; private static final String EVEN_SHARD_COUNT_ALLOCATOR_KEY = "even_shard";
public static final String BALANCED_ALLOCATOR_KEY = "balanced"; // default public static final String BALANCED_ALLOCATOR_KEY = "balanced"; // default
@ -37,13 +39,11 @@ public class ShardsAllocatorModule extends AbstractModule {
private Class<? extends ShardsAllocator> shardsAllocator; private Class<? extends ShardsAllocator> shardsAllocator;
public ShardsAllocatorModule(Settings settings) { public ShardsAllocatorModule(Settings settings) {
this.settings = settings; this.settings = settings;
shardsAllocator = loadShardsAllocator(settings); shardsAllocator = loadShardsAllocator(settings);
} }
@Override @Override
protected void configure() { protected void configure() {
if (shardsAllocator == null) { if (shardsAllocator == null) {
@ -56,10 +56,13 @@ public class ShardsAllocatorModule extends AbstractModule {
private Class<? extends ShardsAllocator> loadShardsAllocator(Settings settings) { private Class<? extends ShardsAllocator> loadShardsAllocator(Settings settings) {
final Class<? extends ShardsAllocator> shardsAllocator; final Class<? extends ShardsAllocator> shardsAllocator;
final String type = settings.get(TYPE_KEY, BALANCED_ALLOCATOR_KEY); final String type = settings.get(TYPE_KEY, BALANCED_ALLOCATOR_KEY);
if (BALANCED_ALLOCATOR_KEY.equals(type)) { if (BALANCED_ALLOCATOR_KEY.equals(type)) {
shardsAllocator = BalancedShardsAllocator.class; shardsAllocator = BalancedShardsAllocator.class;
} else if (EVEN_SHARD_COUNT_ALLOCATOR_KEY.equals(type)) { } else if (EVEN_SHARD_COUNT_ALLOCATOR_KEY.equals(type)) {
shardsAllocator = EvenShardsCountAllocator.class; final ESLogger logger = Loggers.getLogger(getClass(), settings);
logger.warn("{} allocator has been removed in 2.0 using {} instead", EVEN_SHARD_COUNT_ALLOCATOR_KEY, BALANCED_ALLOCATOR_KEY);
shardsAllocator = BalancedShardsAllocator.class;
} else { } else {
shardsAllocator = settings.getAsClass(TYPE_KEY, BalancedShardsAllocator.class, shardsAllocator = settings.getAsClass(TYPE_KEY, BalancedShardsAllocator.class,
"org.elasticsearch.cluster.routing.allocation.allocator.", "Allocator"); "org.elasticsearch.cluster.routing.allocation.allocator.", "Allocator");

View File

@ -20,7 +20,6 @@
package org.elasticsearch.cluster.service; package org.elasticsearch.cluster.service;
import com.google.common.collect.Iterables; import com.google.common.collect.Iterables;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.ClusterState.Builder; import org.elasticsearch.cluster.ClusterState.Builder;
@ -59,6 +58,9 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
*/ */
public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService { public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService {
public static final String SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD = "cluster.service.slow_task_logging_threshold";
public static final String SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL = "cluster.service.reconnect_interval";
public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
private final ThreadPool threadPool; private final ThreadPool threadPool;
@ -74,6 +76,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
private final TimeValue reconnectInterval; private final TimeValue reconnectInterval;
private TimeValue slowTaskLoggingThreshold;
private volatile PrioritizedEsThreadPoolExecutor updateTasksExecutor; private volatile PrioritizedEsThreadPoolExecutor updateTasksExecutor;
/** /**
@ -115,8 +119,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
this.clusterState = ClusterState.builder(clusterName).build(); this.clusterState = ClusterState.builder(clusterName).build();
this.nodeSettingsService.setClusterService(this); this.nodeSettingsService.setClusterService(this);
this.nodeSettingsService.addListener(new ApplySettings());
this.reconnectInterval = this.settings.getAsTime("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10)); this.reconnectInterval = this.settings.getAsTime(SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL, TimeValue.timeValueSeconds(10));
this.slowTaskLoggingThreshold = this.settings.getAsTime(SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, TimeValue.timeValueSeconds(30));
localNodeMasterListeners = new LocalNodeMasterListeners(threadPool); localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
@ -371,22 +378,24 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
return; return;
} }
ClusterState newClusterState; ClusterState newClusterState;
long startTime = System.currentTimeMillis();
try { try {
newClusterState = updateTask.execute(previousClusterState); newClusterState = updateTask.execute(previousClusterState);
} catch (Throwable e) { } catch (Throwable e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("failed to execute cluster state update, state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n"); StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(previousClusterState.nodes().prettyPrint()); sb.append(previousClusterState.nodes().prettyPrint());
sb.append(previousClusterState.routingTable().prettyPrint()); sb.append(previousClusterState.routingTable().prettyPrint());
sb.append(previousClusterState.readOnlyRoutingNodes().prettyPrint()); sb.append(previousClusterState.readOnlyRoutingNodes().prettyPrint());
logger.trace(sb.toString(), e); logger.trace(sb.toString(), e);
} }
warnAboutSlowTaskIfNeeded(executionTime, source);
updateTask.onFailure(source, e); updateTask.onFailure(source, e);
return; return;
} }
if (previousClusterState == newClusterState) { if (previousClusterState == newClusterState) {
logger.debug("processing [{}]: no change in cluster_state", source);
if (updateTask instanceof AckedClusterStateUpdateTask) { if (updateTask instanceof AckedClusterStateUpdateTask) {
//no need to wait for ack if nothing changed, the update can be counted as acknowledged //no need to wait for ack if nothing changed, the update can be counted as acknowledged
((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null); ((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null);
@ -394,6 +403,9 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
if (updateTask instanceof ProcessedClusterStateUpdateTask) { if (updateTask instanceof ProcessedClusterStateUpdateTask) {
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState); ((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
} }
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
warnAboutSlowTaskIfNeeded(executionTime, source);
return; return;
} }
@ -511,9 +523,12 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState); ((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
} }
logger.debug("processing [{}]: done applying updated cluster_state (version: {}, uuid: {})", source, newClusterState.version(), newClusterState.uuid()); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.uuid());
warnAboutSlowTaskIfNeeded(executionTime, source);
} catch (Throwable t) { } catch (Throwable t) {
StringBuilder sb = new StringBuilder("failed to apply updated cluster state:\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.uuid()).append("], source [").append(source).append("]\n"); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.uuid()).append("], source [").append(source).append("]\n");
sb.append(newClusterState.nodes().prettyPrint()); sb.append(newClusterState.nodes().prettyPrint());
sb.append(newClusterState.routingTable().prettyPrint()); sb.append(newClusterState.routingTable().prettyPrint());
sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint()); sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint());
@ -523,6 +538,12 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
} }
private void warnAboutSlowTaskIfNeeded(TimeValue executionTime, String source) {
if (executionTime.getMillis() > slowTaskLoggingThreshold.getMillis()) {
logger.warn("cluster state update task [{}] took {} above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold);
}
}
class NotifyTimeout implements Runnable { class NotifyTimeout implements Runnable {
final TimeoutClusterStateListener listener; final TimeoutClusterStateListener listener;
final TimeValue timeout; final TimeValue timeout;
@ -755,4 +776,13 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
} }
} }
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
final TimeValue slowTaskLoggingThreshold = settings.getAsTime(SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, InternalClusterService.this.slowTaskLoggingThreshold);
InternalClusterService.this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
}
}
} }

View File

@ -25,11 +25,11 @@ import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.*; import org.elasticsearch.cluster.routing.allocation.decider.*;
import org.elasticsearch.cluster.service.InternalClusterService;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.indices.ttl.IndicesTTLService;
@ -61,9 +61,6 @@ public class ClusterDynamicSettingsModule extends AbstractModule {
clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP + "*"); clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP + "*");
clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "*"); clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "*");
clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP + "*"); clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP + "*");
clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_SIZE);
clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_EXPIRE, Validator.TIME);
clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, Validator.POSITIVE_INTEGER);
clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_TYPE); clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_TYPE);
clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
clusterDynamicSettings.addDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME); clusterDynamicSettings.addDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME);
@ -101,6 +98,7 @@ public class ClusterDynamicSettingsModule extends AbstractModule {
clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE); clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE);
clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE); clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE);
clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE); clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE);
clusterDynamicSettings.addDynamicSetting(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, Validator.TIME_NON_NEGATIVE);
} }
public void addDynamicSettings(String... settings) { public void addDynamicSettings(String... settings) {

View File

@ -0,0 +1,74 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
import java.io.IOException;
import java.util.Objects;
/**
* Base implementation for a query which is cacheable at the index level but
* not the segment level as usually expected.
*/
public abstract class IndexCacheableQuery extends Query {
private Object readerCacheKey;
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (reader.getCoreCacheKey() != this.readerCacheKey) {
IndexCacheableQuery rewritten = (IndexCacheableQuery) clone();
rewritten.readerCacheKey = reader.getCoreCacheKey();
return rewritten;
}
return super.rewrite(reader);
}
@Override
public boolean equals(Object obj) {
return super.equals(obj)
&& readerCacheKey == ((IndexCacheableQuery) obj).readerCacheKey;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + Objects.hashCode(readerCacheKey);
}
@Override
public final Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
if (readerCacheKey == null) {
throw new IllegalStateException("Rewrite first");
}
if (readerCacheKey != searcher.getIndexReader().getCoreCacheKey()) {
throw new IllegalStateException("Must create weight on the same reader which has been used for rewriting");
}
return doCreateWeight(searcher, needsScores);
}
/** Create a {@link Weight} for this query.
* @see Query#createWeight(IndexSearcher, boolean)
*/
public abstract Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException;
}

View File

@ -0,0 +1,109 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Multimap;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReader.CoreClosedListener;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardUtils;
import java.io.IOException;
import java.util.IdentityHashMap;
import java.util.Map;
import java.util.Set;
/**
* A map between segment core cache keys and the shard that these segments
* belong to. This allows to get the shard that a segment belongs to or to get
* the entire set of live core cache keys for a given index. In order to work
* this class needs to be notified about new segments. It modifies the current
* mappings as segments that were not known before are added and prevents the
* structure from growing indefinitely by registering close listeners on these
* segments so that at any time it only tracks live segments.
*
* NOTE: This is heavy. Avoid using this class unless absolutely required.
*/
public final class ShardCoreKeyMap {
private final Map<Object, ShardId> coreKeyToShard;
private final Multimap<String, Object> indexToCoreKey;
public ShardCoreKeyMap() {
coreKeyToShard = new IdentityHashMap<>();
indexToCoreKey = HashMultimap.create();
}
/**
* Register a {@link LeafReader}. This is necessary so that the core cache
* key of this reader can be found later using {@link #getCoreCacheKeys(ShardId)}.
*/
public void add(LeafReader reader) {
final ShardId shardId = ShardUtils.extractShardId(reader);
if (shardId == null) {
throw new IllegalArgumentException("Could not extract shard id from " + reader);
}
final Object coreKey = reader.getCoreCacheKey();
final String index = shardId.getIndex();
synchronized (this) {
if (coreKeyToShard.put(coreKey, shardId) == null) {
final boolean added = indexToCoreKey.put(index, coreKey);
assert added;
reader.addCoreClosedListener(new CoreClosedListener() {
@Override
public void onClose(Object ownerCoreCacheKey) throws IOException {
assert coreKey == ownerCoreCacheKey;
synchronized (ShardCoreKeyMap.this) {
coreKeyToShard.remove(ownerCoreCacheKey);
indexToCoreKey.remove(index, coreKey);
}
}
});
}
}
}
/**
* Return the {@link ShardId} that holds the given segment, or {@code null}
* if this segment is not tracked.
*/
public synchronized ShardId getShardId(Object coreKey) {
return coreKeyToShard.get(coreKey);
}
/**
* Get the set of core cache keys associated with the given index.
*/
public synchronized Set<Object> getCoreKeysForIndex(String index) {
return ImmutableSet.copyOf(indexToCoreKey.get(index));
}
/**
* Return the number of tracked segments.
*/
public synchronized int size() {
assert indexToCoreKey.size() == coreKeyToShard.size();
return coreKeyToShard.size();
}
}

View File

@ -22,6 +22,8 @@ package org.elasticsearch.common.lucene.docset;
import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReader;
import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
@ -104,32 +106,41 @@ public class DocIdSets {
} }
/** /**
* Given a {@link DocIdSet}, return a {@link Bits} instance that will match * Given a {@link Scorer}, return a {@link Bits} instance that will match
* all documents contained in the set. Note that the returned {@link Bits} * all documents contained in the set. Note that the returned {@link Bits}
* instance should only be consumed once and in order. * instance MUST be consumed in order.
*/ */
public static Bits asSequentialAccessBits(final int maxDoc, @Nullable DocIdSet set) throws IOException { public static Bits asSequentialAccessBits(final int maxDoc, @Nullable Scorer scorer) throws IOException {
if (set == null) { if (scorer == null) {
return new Bits.MatchNoBits(maxDoc); return new Bits.MatchNoBits(maxDoc);
} }
Bits bits = set.bits(); final TwoPhaseIterator twoPhase = scorer.asTwoPhaseIterator();
if (bits != null) { final DocIdSetIterator iterator;
return bits; if (twoPhase == null) {
} iterator = scorer;
final DocIdSetIterator iterator = set.iterator(); } else {
if (iterator == null) { iterator = twoPhase.approximation();
return new Bits.MatchNoBits(maxDoc);
} }
return new Bits() { return new Bits() {
int previous = 0; int previous = -1;
boolean previousMatched = false;
@Override @Override
public boolean get(int index) { public boolean get(int index) {
if (index < 0 || index >= maxDoc) {
throw new IndexOutOfBoundsException(index + " is out of bounds: [" + 0 + "-" + maxDoc + "[");
}
if (index < previous) { if (index < previous) {
throw new IllegalArgumentException("This Bits instance can only be consumed in order. " throw new IllegalArgumentException("This Bits instance can only be consumed in order. "
+ "Got called on [" + index + "] while previously called on [" + previous + "]"); + "Got called on [" + index + "] while previously called on [" + previous + "]");
} }
if (index == previous) {
// we cache whether it matched because it is illegal to call
// twoPhase.matches() twice
return previousMatched;
}
previous = index; previous = index;
int doc = iterator.docID(); int doc = iterator.docID();
@ -140,7 +151,14 @@ public class DocIdSets {
throw new IllegalStateException("Cannot advance iterator", e); throw new IllegalStateException("Cannot advance iterator", e);
} }
} }
return index == doc; if (index == doc) {
try {
return previousMatched = twoPhase == null || twoPhase.matches();
} catch (IOException e) {
throw new IllegalStateException("Cannot validate match", e);
}
}
return previousMatched = false;
} }
@Override @Override

View File

@ -1,32 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.search.Filter;
/**
* A marker indicating that this is a cached filter.
*/
public abstract class CachedFilter extends Filter {
public static boolean isCached(Filter filter) {
return filter instanceof CachedFilter;
}
}

View File

@ -31,18 +31,18 @@ import java.io.IOException;
public class FilteredCollector implements Collector { public class FilteredCollector implements Collector {
private final Collector collector; private final Collector collector;
private final Filter filter; private final Weight filter;
public FilteredCollector(Collector collector, Filter filter) { public FilteredCollector(Collector collector, Weight filter) {
this.collector = collector; this.collector = collector;
this.filter = filter; this.filter = filter;
} }
@Override @Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
final DocIdSet set = filter.getDocIdSet(context, null); final Scorer filterScorer = filter.scorer(context, null);
final LeafCollector in = collector.getLeafCollector(context); final LeafCollector in = collector.getLeafCollector(context);
final Bits bits = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), set); final Bits bits = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterScorer);
return new FilterLeafCollector(in) { return new FilterLeafCollector(in) {
@Override @Override

View File

@ -1,79 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Bits;
import java.io.IOException;
/**
* A marker interface for {@link org.apache.lucene.search.Filter} denoting the filter
* as one that should not be cached, ever.
*/
public abstract class NoCacheFilter extends Filter {
private static final class NoCacheFilterWrapper extends NoCacheFilter {
private final Filter delegate;
private NoCacheFilterWrapper(Filter delegate) {
this.delegate = delegate;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
return delegate.getDocIdSet(context, acceptDocs);
}
@Override
public int hashCode() {
return delegate.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof NoCacheFilterWrapper) {
return delegate.equals(((NoCacheFilterWrapper)obj).delegate);
}
return false;
}
@Override
public String toString(String field) {
return "no_cache(" + delegate + ")";
}
}
/**
* Wraps a filter in a NoCacheFilter or returns it if it already is a NoCacheFilter.
*/
public static Filter wrap(Filter filter) {
if (filter instanceof NoCacheFilter) {
return filter;
}
return new NoCacheFilterWrapper(filter);
}
}

View File

@ -1,36 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.search.Query;
/**
* Queries are never cached directly, but a query can be wrapped in a filter that may end being cached.
* Filters that wrap this query either directly or indirectly will never be cached.
*/
public abstract class NoCacheQuery extends Query {
@Override
public final String toString(String s) {
return "no_cache(" + innerToString(s) + ")";
}
public abstract String innerToString(String s);
}

View File

@ -31,10 +31,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.search.child.CustomQueryWrappingFilter;
import java.util.List; import java.util.List;
import java.util.regex.Pattern; import java.util.regex.Pattern;
@ -54,19 +51,19 @@ public class Queries {
} }
public static Filter newMatchAllFilter() { public static Filter newMatchAllFilter() {
return wrap(newMatchAllQuery()); return new QueryWrapperFilter(newMatchAllQuery());
} }
public static Filter newMatchNoDocsFilter() { public static Filter newMatchNoDocsFilter() {
return wrap(newMatchNoDocsQuery()); return new QueryWrapperFilter(newMatchNoDocsQuery());
} }
public static Filter newNestedFilter() { public static Filter newNestedFilter() {
return wrap(new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__")))); return new QueryWrapperFilter(new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__"))));
} }
public static Filter newNonNestedFilter() { public static Filter newNonNestedFilter() {
return wrap(not(newNestedFilter())); return new QueryWrapperFilter(not(newNestedFilter()));
} }
/** Return a query that matches all documents but those that match the given query. */ /** Return a query that matches all documents but those that match the given query. */
@ -169,24 +166,4 @@ public class Queries {
optionalClauseCount : (result < 0 ? 0 : result)); optionalClauseCount : (result < 0 ? 0 : result));
} }
/**
* Wraps a query in a filter.
*
* If a filter has an anti per segment execution / caching nature then @{@link CustomQueryWrappingFilter} is returned
* otherwise the standard {@link org.apache.lucene.search.QueryWrapperFilter} is returned.
*/
@SuppressForbidden(reason = "QueryWrapperFilter cachability")
public static Filter wrap(Query query, QueryParseContext context) {
if ((context != null && context.requireCustomQueryWrappingFilter()) || CustomQueryWrappingFilter.shouldUseCustomQueryWrappingFilter(query)) {
return new CustomQueryWrappingFilter(query);
} else {
return new QueryWrapperFilter(query);
}
}
/** Wrap as a {@link Filter}. */
public static Filter wrap(Query query) {
return wrap(query, null);
}
} }

View File

@ -19,9 +19,11 @@
package org.elasticsearch.common.lucene.search; package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import java.io.IOException; import java.io.IOException;
@ -46,4 +48,13 @@ public abstract class ResolvableFilter extends Filter {
return null; return null;
} }
} }
@Override
public Query rewrite(IndexReader reader) throws IOException {
final Filter resolved = resolve();
if (resolved != null) {
return resolved;
}
return super.rewrite(reader);
}
} }

View File

@ -119,16 +119,22 @@ public class FiltersFunctionScoreQuery extends Query {
// TODO: needsScores // TODO: needsScores
// if we dont need scores, just return the underlying Weight? // if we dont need scores, just return the underlying Weight?
Weight subQueryWeight = subQuery.createWeight(searcher, needsScores); Weight subQueryWeight = subQuery.createWeight(searcher, needsScores);
return new CustomBoostFactorWeight(this, subQueryWeight); Weight[] filterWeights = new Weight[filterFunctions.length];
for (int i = 0; i < filterFunctions.length; ++i) {
filterWeights[i] = searcher.createNormalizedWeight(filterFunctions[i].filter, false);
}
return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights);
} }
class CustomBoostFactorWeight extends Weight { class CustomBoostFactorWeight extends Weight {
final Weight subQueryWeight; final Weight subQueryWeight;
final Weight[] filterWeights;
public CustomBoostFactorWeight(Query parent, Weight subQueryWeight) throws IOException { public CustomBoostFactorWeight(Query parent, Weight subQueryWeight, Weight[] filterWeights) throws IOException {
super(parent); super(parent);
this.subQueryWeight = subQueryWeight; this.subQueryWeight = subQueryWeight;
this.filterWeights = filterWeights;
} }
@Override @Override
@ -162,7 +168,8 @@ public class FiltersFunctionScoreQuery extends Query {
for (int i = 0; i < filterFunctions.length; i++) { for (int i = 0; i < filterFunctions.length; i++) {
FilterFunction filterFunction = filterFunctions[i]; FilterFunction filterFunction = filterFunctions[i];
functions[i] = filterFunction.function.getLeafScoreFunction(context); functions[i] = filterFunction.function.getLeafScoreFunction(context);
docSets[i] = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterFunction.filter.getDocIdSet(context, acceptDocs)); Scorer filterScorer = filterWeights[i].scorer(context, null); // no need to apply accepted docs
docSets[i] = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterScorer);
} }
return new FiltersFunctionFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, minScore); return new FiltersFunctionFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, minScore);
} }
@ -177,7 +184,8 @@ public class FiltersFunctionScoreQuery extends Query {
// First: Gather explanations for all filters // First: Gather explanations for all filters
List<Explanation> filterExplanations = new ArrayList<>(); List<Explanation> filterExplanations = new ArrayList<>();
float weightSum = 0; float weightSum = 0;
for (FilterFunction filterFunction : filterFunctions) { for (int i = 0; i < filterFunctions.length; ++i) {
FilterFunction filterFunction = filterFunctions[i];
if (filterFunction.function instanceof WeightFactorFunction) { if (filterFunction.function instanceof WeightFactorFunction) {
weightSum += ((WeightFactorFunction) filterFunction.function).getWeight(); weightSum += ((WeightFactorFunction) filterFunction.function).getWeight();
@ -186,7 +194,7 @@ public class FiltersFunctionScoreQuery extends Query {
} }
Bits docSet = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), Bits docSet = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(),
filterFunction.filter.getDocIdSet(context, context.reader().getLiveDocs())); filterWeights[i].scorer(context, null));
if (docSet.get(doc)) { if (docSet.get(doc)) {
Explanation functionExplanation = filterFunction.function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl); Explanation functionExplanation = filterFunction.function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl);
double factor = functionExplanation.getValue(); double factor = functionExplanation.getValue();

View File

@ -22,6 +22,7 @@ package org.elasticsearch.index;
import com.google.common.base.Function; import com.google.common.base.Function;
import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterators; import com.google.common.collect.Iterators;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -146,7 +147,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
this.indicesLifecycle = (InternalIndicesLifecycle) injector.getInstance(IndicesLifecycle.class); this.indicesLifecycle = (InternalIndicesLifecycle) injector.getInstance(IndicesLifecycle.class);
// inject workarounds for cyclic dep // inject workarounds for cyclic dep
indexCache.filter().setIndexService(this);
indexFieldData.setIndexService(this); indexFieldData.setIndexService(this);
bitSetFilterCache.setIndexService(this); bitSetFilterCache.setIndexService(this);
this.nodeEnv = nodeEnv; this.nodeEnv = nodeEnv;

View File

@ -22,10 +22,10 @@ package org.elasticsearch.index.aliases;
import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.QueryWrapperFilter;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.compress.CompressedString;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
@ -109,7 +109,7 @@ public class IndexAliasesService extends AbstractIndexComponent implements Itera
return null; return null;
} }
} }
return Queries.wrap(combined); return new QueryWrapperFilter(combined);
} }
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.cache; package org.elasticsearch.index.cache;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -37,12 +38,14 @@ import java.io.IOException;
public class IndexCache extends AbstractIndexComponent implements Closeable { public class IndexCache extends AbstractIndexComponent implements Closeable {
private final FilterCache filterCache; private final FilterCache filterCache;
private final QueryCachingPolicy filterCachingPolicy;
private final BitsetFilterCache bitsetFilterCache; private final BitsetFilterCache bitsetFilterCache;
@Inject @Inject
public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, BitsetFilterCache bitsetFilterCache) { public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryCachingPolicy filterCachingPolicy, BitsetFilterCache bitsetFilterCache) {
super(index, indexSettings); super(index, indexSettings);
this.filterCache = filterCache; this.filterCache = filterCache;
this.filterCachingPolicy = filterCachingPolicy;
this.bitsetFilterCache = bitsetFilterCache; this.bitsetFilterCache = bitsetFilterCache;
} }
@ -50,6 +53,10 @@ public class IndexCache extends AbstractIndexComponent implements Closeable {
return filterCache; return filterCache;
} }
public QueryCachingPolicy filterPolicy() {
return filterCachingPolicy;
}
/** /**
* Return the {@link BitsetFilterCache} for this index. * Return the {@link BitsetFilterCache} for this index.
*/ */

View File

@ -36,7 +36,6 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.NoCacheFilter;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
@ -105,7 +104,6 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea
public BitDocIdSetFilter getBitDocIdSetFilter(Filter filter) { public BitDocIdSetFilter getBitDocIdSetFilter(Filter filter) {
assert filter != null; assert filter != null;
assert !(filter instanceof NoCacheFilter);
return new BitDocIdSetFilterWrapper(filter); return new BitDocIdSetFilterWrapper(filter);
} }

View File

@ -19,19 +19,14 @@
package org.elasticsearch.index.cache.filter; package org.elasticsearch.index.cache.filter;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.QueryCachingPolicy;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lucene.HashedBytesRef;
import org.elasticsearch.index.IndexComponent; import org.elasticsearch.index.IndexComponent;
import org.elasticsearch.index.IndexService;
import java.io.Closeable; import java.io.Closeable;
/** /**
* *
*/ */
public interface FilterCache extends IndexComponent, Closeable { public interface FilterCache extends IndexComponent, Closeable, org.apache.lucene.search.QueryCache {
static class EntriesStats { static class EntriesStats {
public final long sizeInBytes; public final long sizeInBytes;
@ -43,16 +38,5 @@ public interface FilterCache extends IndexComponent, Closeable {
} }
} }
// we need to "inject" the index service to not create cyclic dep
void setIndexService(IndexService indexService);
String type();
Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy policy);
void clear(Object reader);
void clear(String reason); void clear(String reason);
void clear(String reason, String[] keys);
} }

View File

@ -24,7 +24,7 @@ import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Scopes; import org.elasticsearch.common.inject.Scopes;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.cache.filter.index.IndexFilterCache;
/** /**
* *
@ -46,7 +46,7 @@ public class FilterCacheModule extends AbstractModule {
@Override @Override
protected void configure() { protected void configure() {
bind(FilterCache.class) bind(FilterCache.class)
.to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache")) .to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache"))
.in(Scopes.SINGLETON); .in(Scopes.SINGLETON);
// the filter cache is a node-level thing, however we want the most popular filters // the filter cache is a node-level thing, however we want the most popular filters
// to be computed on a per-index basis, that is why we don't use the SINGLETON // to be computed on a per-index basis, that is why we don't use the SINGLETON

View File

@ -33,32 +33,79 @@ import java.io.IOException;
*/ */
public class FilterCacheStats implements Streamable, ToXContent { public class FilterCacheStats implements Streamable, ToXContent {
long memorySize; long ramBytesUsed;
long evictions; long hitCount;
long missCount;
long cacheCount;
long cacheSize;
public FilterCacheStats() { public FilterCacheStats() {
} }
public FilterCacheStats(long memorySize, long evictions) { public FilterCacheStats(long ramBytesUsed, long hitCount, long missCount, long cacheCount, long cacheSize) {
this.memorySize = memorySize; this.ramBytesUsed = ramBytesUsed;
this.evictions = evictions; this.hitCount = hitCount;
this.missCount = missCount;
this.cacheCount = cacheCount;
this.cacheSize = cacheSize;
} }
public void add(FilterCacheStats stats) { public void add(FilterCacheStats stats) {
this.memorySize += stats.memorySize; ramBytesUsed += stats.ramBytesUsed;
this.evictions += stats.evictions; hitCount += stats.hitCount;
missCount += stats.missCount;
cacheCount += stats.cacheCount;
cacheSize += stats.cacheSize;
} }
public long getMemorySizeInBytes() { public long getMemorySizeInBytes() {
return this.memorySize; return ramBytesUsed;
} }
public ByteSizeValue getMemorySize() { public ByteSizeValue getMemorySize() {
return new ByteSizeValue(memorySize); return new ByteSizeValue(ramBytesUsed);
} }
/**
* The total number of lookups in the cache.
*/
public long getTotalCount() {
return hitCount + missCount;
}
/**
* The number of successful lookups in the cache.
*/
public long getHitCount() {
return hitCount;
}
/**
* The number of lookups in the cache that failed to retrieve a {@link DocIdSet}.
*/
public long getMissCount() {
return missCount;
}
/**
* The number of {@link DocIdSet}s that have been cached.
*/
public long getCacheCount() {
return cacheCount;
}
/**
* The number of {@link DocIdSet}s that are in the cache.
*/
public long getCacheSize() {
return cacheSize;
}
/**
* The number of {@link DocIdSet}s that have been evicted from the cache.
*/
public long getEvictions() { public long getEvictions() {
return this.evictions; return cacheCount - cacheSize;
} }
public static FilterCacheStats readFilterCacheStats(StreamInput in) throws IOException { public static FilterCacheStats readFilterCacheStats(StreamInput in) throws IOException {
@ -67,22 +114,34 @@ public class FilterCacheStats implements Streamable, ToXContent {
return stats; return stats;
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
memorySize = in.readVLong(); ramBytesUsed = in.readLong();
evictions = in.readVLong(); hitCount = in.readLong();
missCount = in.readLong();
cacheCount = in.readLong();
cacheSize = in.readLong();
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(memorySize); out.writeLong(ramBytesUsed);
out.writeVLong(evictions); out.writeLong(hitCount);
out.writeLong(missCount);
out.writeLong(cacheCount);
out.writeLong(cacheSize);
} }
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(Fields.FILTER_CACHE); builder.startObject(Fields.FILTER_CACHE);
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize); builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, ramBytesUsed);
builder.field(Fields.TOTAL_COUNT, getTotalCount());
builder.field(Fields.HIT_COUNT, getHitCount());
builder.field(Fields.MISS_COUNT, getMissCount());
builder.field(Fields.CACHE_SIZE, getCacheSize());
builder.field(Fields.CACHE_COUNT, getCacheCount());
builder.field(Fields.EVICTIONS, getEvictions()); builder.field(Fields.EVICTIONS, getEvictions());
builder.endObject(); builder.endObject();
return builder; return builder;
@ -92,6 +151,12 @@ public class FilterCacheStats implements Streamable, ToXContent {
static final XContentBuilderString FILTER_CACHE = new XContentBuilderString("filter_cache"); static final XContentBuilderString FILTER_CACHE = new XContentBuilderString("filter_cache");
static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size"); static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes"); static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count");
static final XContentBuilderString HIT_COUNT = new XContentBuilderString("hit_count");
static final XContentBuilderString MISS_COUNT = new XContentBuilderString("miss_count");
static final XContentBuilderString CACHE_SIZE = new XContentBuilderString("cache_size");
static final XContentBuilderString CACHE_COUNT = new XContentBuilderString("cache_count");
static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions"); static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions");
} }
} }

View File

@ -19,45 +19,35 @@
package org.elasticsearch.index.cache.filter; package org.elasticsearch.index.cache.filter;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import org.apache.lucene.search.DocIdSet;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.docset.DocIdSets;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
import java.io.Closeable;
import java.io.IOException;
/** /**
*/ */
public class ShardFilterCache extends AbstractIndexShardComponent implements RemovalListener<WeightedFilterCache.FilterCacheKey, DocIdSet> { public class ShardFilterCache extends AbstractIndexShardComponent implements Closeable {
final CounterMetric evictionsMetric = new CounterMetric(); final IndicesFilterCache cache;
final CounterMetric totalMetric = new CounterMetric();
@Inject @Inject
public ShardFilterCache(ShardId shardId, @IndexSettings Settings indexSettings) { public ShardFilterCache(ShardId shardId, @IndexSettings Settings indexSettings, IndicesFilterCache cache) {
super(shardId, indexSettings); super(shardId, indexSettings);
this.cache = cache;
} }
public FilterCacheStats stats() { public FilterCacheStats stats() {
return new FilterCacheStats(totalMetric.count(), evictionsMetric.count()); return cache.getStats(shardId);
}
public void onCached(long sizeInBytes) {
totalMetric.inc(sizeInBytes);
} }
@Override @Override
public void onRemoval(RemovalNotification<WeightedFilterCache.FilterCacheKey, DocIdSet> removalNotification) { public void close() throws IOException {
if (removalNotification.wasEvicted()) { cache.onClose(shardId);
evictionsMetric.inc();
}
if (removalNotification.getValue() != null) {
totalMetric.dec(DocIdSets.sizeInBytes(removalNotification.getValue()));
}
} }
} }

View File

@ -0,0 +1,63 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.filter.index;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.Weight;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.cache.filter.FilterCache;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
/**
* The index-level filter cache. This class mostly delegates to the node-level
* filter cache: {@link IndicesFilterCache}.
*/
public class IndexFilterCache extends AbstractIndexComponent implements FilterCache {
final IndicesFilterCache indicesFilterCache;
@Inject
public IndexFilterCache(Index index, @IndexSettings Settings indexSettings, IndicesFilterCache indicesFilterCache) {
super(index, indexSettings);
this.indicesFilterCache = indicesFilterCache;
}
@Override
public void close() throws ElasticsearchException {
clear("close");
}
@Override
public void clear(String reason) {
logger.debug("full cache clear, reason [{}]", reason);
indicesFilterCache.clearIndex(index.getName());
}
@Override
public Weight doCache(Weight weight, QueryCachingPolicy policy) {
return indicesFilterCache.doCache(weight, policy);
}
}

View File

@ -19,15 +19,12 @@
package org.elasticsearch.index.cache.filter.none; package org.elasticsearch.index.cache.filter.none;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.QueryCachingPolicy;
import org.elasticsearch.common.Nullable; import org.apache.lucene.search.Weight;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.HashedBytesRef;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.cache.filter.FilterCache;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
@ -42,38 +39,18 @@ public class NoneFilterCache extends AbstractIndexComponent implements FilterCac
logger.debug("Using no filter cache"); logger.debug("Using no filter cache");
} }
@Override
public void setIndexService(IndexService indexService) {
// nothing to do here...
}
@Override
public String type() {
return "none";
}
@Override @Override
public void close() { public void close() {
// nothing to do here // nothing to do here
} }
@Override @Override
public Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy policy) { public Weight doCache(Weight weight, QueryCachingPolicy policy) {
return filterToCache; return weight;
} }
@Override @Override
public void clear(String reason) { public void clear(String reason) {
// nothing to do here // nothing to do here
} }
@Override
public void clear(String reason, String[] keys) {
// nothing to do there
}
@Override
public void clear(Object reader) {
// nothing to do here
}
} }

View File

@ -1,277 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.filter.weighted;
import com.google.common.cache.Cache;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.Weigher;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SegmentReader;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.util.Bits;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.HashedBytesRef;
import org.elasticsearch.common.lucene.docset.DocIdSets;
import org.elasticsearch.common.lucene.search.CachedFilter;
import org.elasticsearch.common.lucene.search.NoCacheFilter;
import org.elasticsearch.common.lucene.search.ResolvableFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.cache.filter.FilterCache;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardUtils;
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
import java.io.IOException;
import java.util.concurrent.ConcurrentMap;
public class WeightedFilterCache extends AbstractIndexComponent implements FilterCache, SegmentReader.CoreClosedListener, IndexReader.ReaderClosedListener {
final IndicesFilterCache indicesFilterCache;
IndexService indexService;
final ConcurrentMap<Object, Boolean> seenReaders = ConcurrentCollections.newConcurrentMap();
@Inject
public WeightedFilterCache(Index index, @IndexSettings Settings indexSettings, IndicesFilterCache indicesFilterCache) {
super(index, indexSettings);
this.indicesFilterCache = indicesFilterCache;
}
@Override
public void setIndexService(IndexService indexService) {
this.indexService = indexService;
}
@Override
public String type() {
return "weighted";
}
@Override
public void close() {
clear("close");
}
@Override
public void onClose(IndexReader reader) {
clear(reader.getCoreCacheKey());
}
@Override
public void clear(String reason) {
logger.debug("full cache clear, reason [{}]", reason);
for (Object readerKey : seenReaders.keySet()) {
Boolean removed = seenReaders.remove(readerKey);
if (removed == null) {
return;
}
indicesFilterCache.addReaderKeyToClean(readerKey);
}
}
@Override
public void clear(String reason, String[] keys) {
logger.debug("clear keys [], reason [{}]", reason, keys);
for (String key : keys) {
final HashedBytesRef keyBytes = new HashedBytesRef(key);
for (Object readerKey : seenReaders.keySet()) {
indicesFilterCache.cache().invalidate(new FilterCacheKey(readerKey, keyBytes));
}
}
}
@Override
public void onClose(Object coreKey) {
clear(coreKey);
}
@Override
public void clear(Object coreCacheKey) {
// we add the seen reader before we add the first cache entry for this reader
// so, if we don't see it here, its won't be in the cache
Boolean removed = seenReaders.remove(coreCacheKey);
if (removed == null) {
return;
}
indicesFilterCache.addReaderKeyToClean(coreCacheKey);
}
@Override
public Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy cachePolicy) {
if (filterToCache == null) {
return null;
}
if (filterToCache instanceof NoCacheFilter) {
return filterToCache;
}
if (CachedFilter.isCached(filterToCache)) {
return filterToCache;
}
if (filterToCache instanceof ResolvableFilter) {
throw new IllegalArgumentException("Cannot cache instances of ResolvableFilter: " + filterToCache);
}
return new FilterCacheFilterWrapper(filterToCache, cacheKey, cachePolicy, this);
}
static class FilterCacheFilterWrapper extends CachedFilter {
private final Filter filter;
private final Object filterCacheKey;
private final QueryCachingPolicy cachePolicy;
private final WeightedFilterCache cache;
FilterCacheFilterWrapper(Filter filter, Object cacheKey, QueryCachingPolicy cachePolicy, WeightedFilterCache cache) {
this.filter = filter;
this.filterCacheKey = cacheKey != null ? cacheKey : filter;
this.cachePolicy = cachePolicy;
this.cache = cache;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
if (context.ord == 0) {
cachePolicy.onUse(filter);
}
FilterCacheKey cacheKey = new FilterCacheKey(context.reader().getCoreCacheKey(), filterCacheKey);
Cache<FilterCacheKey, DocIdSet> innerCache = cache.indicesFilterCache.cache();
DocIdSet cacheValue = innerCache.getIfPresent(cacheKey);
final DocIdSet ret;
if (cacheValue != null) {
ret = cacheValue;
} else {
final DocIdSet uncached = filter.getDocIdSet(context, null);
if (cachePolicy.shouldCache(filter, context)) {
if (!cache.seenReaders.containsKey(context.reader().getCoreCacheKey())) {
Boolean previous = cache.seenReaders.putIfAbsent(context.reader().getCoreCacheKey(), Boolean.TRUE);
if (previous == null) {
// we add a core closed listener only, for non core IndexReaders we rely on clear being called (percolator for example)
context.reader().addCoreClosedListener(cache);
}
}
// we can't pass down acceptedDocs provided, because we are caching the result, and acceptedDocs
// might be specific to a query. We don't pass the live docs either because a cache built for a specific
// generation of a segment might be reused by an older generation which has fewer deleted documents
cacheValue = DocIdSets.toCacheable(context.reader(), uncached);
// we might put the same one concurrently, that's fine, it will be replaced and the removal
// will be called
ShardId shardId = ShardUtils.extractShardId(context.reader());
if (shardId != null) {
IndexShard shard = cache.indexService.shard(shardId.id());
if (shard != null) {
cacheKey.removalListener = shard.filterCache();
shard.filterCache().onCached(DocIdSets.sizeInBytes(cacheValue));
}
}
innerCache.put(cacheKey, cacheValue);
ret = cacheValue;
} else {
// uncached
ret = uncached;
}
}
return BitsFilteredDocIdSet.wrap(DocIdSets.isEmpty(ret) ? null : ret, acceptDocs);
}
@Override
public String toString(String field) {
return "cache(" + filter + ")";
}
@Override
public boolean equals(Object o) {
if (super.equals(o) == false) return false;
return this.filter.equals(((FilterCacheFilterWrapper) o).filter);
}
@Override
public int hashCode() {
return 31 * super.hashCode() + filter.hashCode();
}
}
/** A weigher for the Guava filter cache that uses a minimum entry size */
public static class FilterCacheValueWeigher implements Weigher<WeightedFilterCache.FilterCacheKey, DocIdSet> {
private final int minimumEntrySize;
public FilterCacheValueWeigher(int minimumEntrySize) {
this.minimumEntrySize = minimumEntrySize;
}
@Override
public int weigh(FilterCacheKey key, DocIdSet value) {
int weight = (int) Math.min(DocIdSets.sizeInBytes(value), Integer.MAX_VALUE);
return Math.max(weight, this.minimumEntrySize);
}
}
public static class FilterCacheKey {
private final Object readerKey;
private final Object filterKey;
// if we know, we will try and set the removal listener (for statistics)
// its ok that its not volatile because we make sure we only set it when the object is created before its shared between threads
@Nullable
public RemovalListener<WeightedFilterCache.FilterCacheKey, DocIdSet> removalListener;
public FilterCacheKey(Object readerKey, Object filterKey) {
this.readerKey = readerKey;
this.filterKey = filterKey;
}
public Object readerKey() {
return readerKey;
}
public Object filterKey() {
return filterKey;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
FilterCacheKey that = (FilterCacheKey) o;
return (readerKey().equals(that.readerKey()) && filterKey.equals(that.filterKey));
}
@Override
public int hashCode() {
return readerKey().hashCode() + 31 * filterKey.hashCode();
}
}
}

View File

@ -21,14 +21,10 @@ package org.elasticsearch.index.cache.query;
import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification; import com.google.common.cache.RemovalNotification;
import org.apache.lucene.search.DocIdSet;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.docset.DocIdSets;
import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.cache.filter.FilterCacheStats;
import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;

View File

@ -21,6 +21,8 @@ package org.elasticsearch.index.engine;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.Codec;
import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.search.QueryCache;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -73,6 +75,8 @@ public final class EngineConfig {
private final CodecService codecService; private final CodecService codecService;
private final Engine.FailedEngineListener failedEngineListener; private final Engine.FailedEngineListener failedEngineListener;
private final boolean ignoreUnknownTranslog; private final boolean ignoreUnknownTranslog;
private final QueryCache filterCache;
private final QueryCachingPolicy filterCachingPolicy;
/** /**
* Index setting for index concurrency / number of threadstates in the indexwriter. * Index setting for index concurrency / number of threadstates in the indexwriter.
@ -134,7 +138,11 @@ public final class EngineConfig {
/** /**
* Creates a new {@link org.elasticsearch.index.engine.EngineConfig} * Creates a new {@link org.elasticsearch.index.engine.EngineConfig}
*/ */
public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService, IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, TranslogRecoveryPerformer translogRecoveryPerformer) { public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService,
IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer,
Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener,
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache filterCache, QueryCachingPolicy filterCachingPolicy) {
this.shardId = shardId; this.shardId = shardId;
this.threadPool = threadPool; this.threadPool = threadPool;
this.indexingService = indexingService; this.indexingService = indexingService;
@ -159,6 +167,8 @@ public final class EngineConfig {
updateVersionMapSize(); updateVersionMapSize();
this.translogRecoveryPerformer = translogRecoveryPerformer; this.translogRecoveryPerformer = translogRecoveryPerformer;
this.ignoreUnknownTranslog = indexSettings.getAsBoolean(INDEX_IGNORE_UNKNOWN_TRANSLOG, false); this.ignoreUnknownTranslog = indexSettings.getAsBoolean(INDEX_IGNORE_UNKNOWN_TRANSLOG, false);
this.filterCache = filterCache;
this.filterCachingPolicy = filterCachingPolicy;
} }
/** updates {@link #versionMapSize} based on current setting and {@link #indexingBufferSize} */ /** updates {@link #versionMapSize} based on current setting and {@link #indexingBufferSize} */
@ -397,4 +407,18 @@ public final class EngineConfig {
public TranslogRecoveryPerformer getTranslogRecoveryPerformer() { public TranslogRecoveryPerformer getTranslogRecoveryPerformer() {
return translogRecoveryPerformer; return translogRecoveryPerformer;
} }
/**
* Return the cache to use for filters.
*/
public QueryCache getFilterCache() {
return filterCache;
}
/**
* Return the policy to use when caching filters.
*/
public QueryCachingPolicy getFilterCachingPolicy() {
return filterCachingPolicy;
}
} }

View File

@ -40,7 +40,9 @@ public class EngineSearcherFactory extends SearcherFactory {
@Override @Override
public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = super.newSearcher(reader, previousReader);
searcher.setQueryCache(engineConfig.getFilterCache());
searcher.setQueryCachingPolicy(engineConfig.getFilterCachingPolicy());
searcher.setSimilarity(engineConfig.getSimilarity()); searcher.setSimilarity(engineConfig.getSimilarity());
return searcher; return searcher;
} }

View File

@ -1006,7 +1006,7 @@ public class InternalEngine extends Engine {
try { try {
assert isMergedSegment(reader); assert isMergedSegment(reader);
if (warmer != null) { if (warmer != null) {
final Engine.Searcher searcher = new Searcher("warmer", new IndexSearcher(reader)); final Engine.Searcher searcher = new Searcher("warmer", searcherFactory.newSearcher(reader, null));
final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId, searcher); final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId, searcher);
warmer.warmNewReaders(context); warmer.warmNewReaders(context);
} }
@ -1039,8 +1039,7 @@ public class InternalEngine extends Engine {
@Override @Override
public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = super.newSearcher(reader, previousReader);
searcher.setSimilarity(engineConfig.getSimilarity());
if (warmer != null) { if (warmer != null) {
// we need to pass a custom searcher that does not release anything on Engine.Search Release, // we need to pass a custom searcher that does not release anything on Engine.Search Release,
// we will release explicitly // we will release explicitly
@ -1072,7 +1071,8 @@ public class InternalEngine extends Engine {
} }
if (!readers.isEmpty()) { if (!readers.isEmpty()) {
// we don't want to close the inner readers, just increase ref on them // we don't want to close the inner readers, just increase ref on them
newSearcher = new IndexSearcher(new MultiReader(readers.toArray(new IndexReader[readers.size()]), false)); IndexReader newReader = new MultiReader(readers.toArray(new IndexReader[readers.size()]), false);
newSearcher = super.newSearcher(newReader, null);
closeNewSearcher = true; closeNewSearcher = true;
} }
} }

View File

@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSet;
@ -43,7 +44,19 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.Mapping.SourceTransform; import org.elasticsearch.index.mapper.Mapping.SourceTransform;
import org.elasticsearch.index.mapper.internal.*; import org.elasticsearch.index.mapper.internal.AllFieldMapper;
import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper;
import org.elasticsearch.index.mapper.internal.IdFieldMapper;
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
import org.elasticsearch.index.mapper.internal.SizeFieldMapper;
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper;
import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ExecutableScript;
@ -54,7 +67,12 @@ import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException; import java.io.IOException;
import java.util.*; import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
/** /**
@ -343,7 +361,7 @@ public class DocumentMapper implements ToXContent {
continue; continue;
} }
Filter filter = sc.filterCache().cache(objectMapper.nestedTypeFilter(), null, sc.queryParserService().autoFilterCachePolicy()); Filter filter = objectMapper.nestedTypeFilter();
if (filter == null) { if (filter == null) {
continue; continue;
} }

View File

@ -20,13 +20,11 @@
package org.elasticsearch.index.mapper; package org.elasticsearch.index.mapper;
import com.carrotsearch.hppc.ObjectOpenHashSet; import com.carrotsearch.hppc.ObjectOpenHashSet;
import com.google.common.base.Charsets;
import com.google.common.base.Predicate; import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterators; import com.google.common.collect.Iterators;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
@ -36,6 +34,7 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchGenerationException;
@ -44,14 +43,9 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.compress.CompressedString;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.FailedToResolveConfigException;
import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.analysis.AnalysisService;
@ -67,8 +61,6 @@ import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import java.io.IOException; import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
@ -122,7 +114,7 @@ public class MapperService extends AbstractIndexComponent {
private volatile ImmutableMap<String, FieldMapper<?>> unmappedFieldMappers = ImmutableMap.of(); private volatile ImmutableMap<String, FieldMapper<?>> unmappedFieldMappers = ImmutableMap.of();
@Inject @Inject
public MapperService(Index index, @IndexSettings Settings indexSettings, Environment environment, AnalysisService analysisService, IndexFieldDataService fieldDataService, public MapperService(Index index, @IndexSettings Settings indexSettings, AnalysisService analysisService, IndexFieldDataService fieldDataService,
SimilarityLookupService similarityLookupService, SimilarityLookupService similarityLookupService,
ScriptService scriptService) { ScriptService scriptService) {
super(index, indexSettings); super(index, indexSettings);
@ -134,107 +126,36 @@ public class MapperService extends AbstractIndexComponent {
this.searchQuoteAnalyzer = new SmartIndexNameSearchQuoteAnalyzer(analysisService.defaultSearchQuoteAnalyzer()); this.searchQuoteAnalyzer = new SmartIndexNameSearchQuoteAnalyzer(analysisService.defaultSearchQuoteAnalyzer());
this.dynamic = indexSettings.getAsBoolean("index.mapper.dynamic", true); this.dynamic = indexSettings.getAsBoolean("index.mapper.dynamic", true);
String defaultMappingLocation = indexSettings.get("index.mapper.default_mapping_location"); defaultPercolatorMappingSource = "{\n" +
final URL defaultMappingUrl; "\"_default_\":{\n" +
"\"properties\" : {\n" +
"\"query\" : {\n" +
"\"type\" : \"object\",\n" +
"\"enabled\" : false\n" +
"}\n" +
"}\n" +
"}\n" +
"}";
if (index.getName().equals(ScriptService.SCRIPT_INDEX)){ if (index.getName().equals(ScriptService.SCRIPT_INDEX)){
defaultMappingUrl = getMappingUrl(indexSettings, environment, defaultMappingLocation, "script-mapping.json", "org/elasticsearch/index/mapper/script-mapping.json"); defaultMappingSource = "{" +
} else { "\"_default_\": {" +
defaultMappingUrl = getMappingUrl(indexSettings, environment, defaultMappingLocation, "default-mapping.json", "org/elasticsearch/index/mapper/default-mapping.json"); "\"properties\": {" +
}
if (defaultMappingUrl == null) {
logger.info("failed to find default-mapping.json in the classpath, using the default template");
if (index.getName().equals(ScriptService.SCRIPT_INDEX)){
defaultMappingSource = "{" +
"\"_default_\": {" +
"\"properties\": {" +
"\"script\": { \"enabled\": false }," + "\"script\": { \"enabled\": false }," +
"\"template\": { \"enabled\": false }" + "\"template\": { \"enabled\": false }" +
"}" + "}" +
"}" + "}" +
"}"; "}";
} else {
defaultMappingSource = "{\n" +
" \"_default_\":{\n" +
" }\n" +
"}";
}
} else { } else {
try { defaultMappingSource = "{\"_default_\":{}}";
defaultMappingSource = Streams.copyToString(FileSystemUtils.newBufferedReader(defaultMappingUrl, Charsets.UTF_8));
} catch (IOException e) {
throw new MapperException("Failed to load default mapping source from [" + defaultMappingLocation + "]", e);
}
}
String percolatorMappingLocation = indexSettings.get("index.mapper.default_percolator_mapping_location");
URL percolatorMappingUrl = null;
if (percolatorMappingLocation != null) {
try {
percolatorMappingUrl = environment.resolveConfig(percolatorMappingLocation);
} catch (FailedToResolveConfigException e) {
// not there, default to the built in one
try {
percolatorMappingUrl = PathUtils.get(percolatorMappingLocation).toUri().toURL();
} catch (MalformedURLException e1) {
throw new FailedToResolveConfigException("Failed to resolve default percolator mapping location [" + percolatorMappingLocation + "]");
}
}
}
if (percolatorMappingUrl != null) {
try {
defaultPercolatorMappingSource = Streams.copyToString(FileSystemUtils.newBufferedReader(percolatorMappingUrl, Charsets.UTF_8));
} catch (IOException e) {
throw new MapperException("Failed to load default percolator mapping source from [" + percolatorMappingUrl + "]", e);
}
} else {
defaultPercolatorMappingSource = "{\n" +
//" \"" + PercolatorService.TYPE_NAME + "\":{\n" +
" \"" + "_default_" + "\":{\n" +
" \"properties\" : {\n" +
" \"query\" : {\n" +
" \"type\" : \"object\",\n" +
" \"enabled\" : false\n" +
" }\n" +
" }\n" +
" }\n" +
"}";
} }
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("using dynamic[{}], default mapping: default_mapping_location[{}], loaded_from[{}] and source[{}], default percolator mapping: location[{}], loaded_from[{}] and source[{}]", dynamic, defaultMappingLocation, defaultMappingUrl, defaultMappingSource, percolatorMappingLocation, percolatorMappingUrl, defaultPercolatorMappingSource); logger.trace("using dynamic[{}], default mapping source[{}], default percolator mapping source[{}]", dynamic, defaultMappingSource, defaultPercolatorMappingSource);
} else if (logger.isDebugEnabled()) { } else if (logger.isDebugEnabled()) {
logger.debug("using dynamic[{}], default mapping: default_mapping_location[{}], loaded_from[{}], default percolator mapping: location[{}], loaded_from[{}]", dynamic, defaultMappingLocation, defaultMappingUrl, percolatorMappingLocation, percolatorMappingUrl); logger.debug("using dynamic[{}]", dynamic);
} }
} }
private URL getMappingUrl(Settings indexSettings, Environment environment, String mappingLocation, String configString, String resourceLocation) {
URL mappingUrl;
if (mappingLocation == null) {
try {
mappingUrl = environment.resolveConfig(configString);
} catch (FailedToResolveConfigException e) {
// not there, default to the built in one
mappingUrl = indexSettings.getClassLoader().getResource(resourceLocation);
if (mappingUrl == null) {
mappingUrl = MapperService.class.getClassLoader().getResource(resourceLocation);
}
}
} else {
try {
mappingUrl = environment.resolveConfig(mappingLocation);
} catch (FailedToResolveConfigException e) {
// not there, default to the built in one
try {
mappingUrl = PathUtils.get(mappingLocation).toUri().toURL();
} catch (MalformedURLException e1) {
throw new FailedToResolveConfigException("Failed to resolve dynamic mapping location [" + mappingLocation + "]");
}
}
}
return mappingUrl;
}
public void close() { public void close() {
for (DocumentMapper documentMapper : mappers.values()) { for (DocumentMapper documentMapper : mappers.values()) {
documentMapper.close(); documentMapper.close();
@ -451,11 +372,11 @@ public class MapperService extends AbstractIndexComponent {
BooleanQuery bq = new BooleanQuery(); BooleanQuery bq = new BooleanQuery();
bq.add(percolatorType, Occur.MUST_NOT); bq.add(percolatorType, Occur.MUST_NOT);
bq.add(Queries.newNonNestedFilter(), Occur.MUST); bq.add(Queries.newNonNestedFilter(), Occur.MUST);
return Queries.wrap(bq); return new QueryWrapperFilter(bq);
} else if (hasNested) { } else if (hasNested) {
return Queries.newNonNestedFilter(); return Queries.newNonNestedFilter();
} else if (filterPercolateType) { } else if (filterPercolateType) {
return Queries.wrap(Queries.not(percolatorType)); return new QueryWrapperFilter(Queries.not(percolatorType));
} else { } else {
return null; return null;
} }
@ -464,12 +385,12 @@ public class MapperService extends AbstractIndexComponent {
// since they have different types (starting with __) // since they have different types (starting with __)
if (types.length == 1) { if (types.length == 1) {
DocumentMapper docMapper = documentMapper(types[0]); DocumentMapper docMapper = documentMapper(types[0]);
Filter filter = docMapper != null ? docMapper.typeFilter() : Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, types[0]))); Filter filter = docMapper != null ? docMapper.typeFilter() : new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, types[0])));
if (filterPercolateType) { if (filterPercolateType) {
BooleanQuery bq = new BooleanQuery(); BooleanQuery bq = new BooleanQuery();
bq.add(percolatorType, Occur.MUST_NOT); bq.add(percolatorType, Occur.MUST_NOT);
bq.add(filter, Occur.MUST); bq.add(filter, Occur.MUST);
return Queries.wrap(bq); return new QueryWrapperFilter(bq);
} else { } else {
return filter; return filter;
} }
@ -499,9 +420,9 @@ public class MapperService extends AbstractIndexComponent {
BooleanQuery bq = new BooleanQuery(); BooleanQuery bq = new BooleanQuery();
bq.add(percolatorType, Occur.MUST_NOT); bq.add(percolatorType, Occur.MUST_NOT);
bq.add(termsFilter, Occur.MUST); bq.add(termsFilter, Occur.MUST);
return Queries.wrap(bq); return new QueryWrapperFilter(bq);
} else { } else {
return Queries.wrap(termsFilter); return new QueryWrapperFilter(termsFilter);
} }
} else { } else {
// Current bool filter requires that at least one should clause matches, even with a must clause. // Current bool filter requires that at least one should clause matches, even with a must clause.
@ -521,7 +442,7 @@ public class MapperService extends AbstractIndexComponent {
bool.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST); bool.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST);
} }
return Queries.wrap(bool); return new QueryWrapperFilter(bool);
} }
} }

View File

@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import com.google.common.base.Objects; import com.google.common.base.Objects;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType; import org.apache.lucene.document.FieldType;
@ -35,6 +36,7 @@ import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.RegexpQuery;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.TermRangeQuery;
@ -480,7 +482,7 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
@Override @Override
public Filter termFilter(Object value, @Nullable QueryParseContext context) { public Filter termFilter(Object value, @Nullable QueryParseContext context) {
return Queries.wrap(new TermQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(indexedValueForSearch(value))));
} }
@Override @Override
@ -499,7 +501,7 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
for (int i = 0; i < bytesRefs.length; i++) { for (int i = 0; i < bytesRefs.length; i++) {
bytesRefs[i] = indexedValueForSearch(values.get(i)); bytesRefs[i] = indexedValueForSearch(values.get(i));
} }
return Queries.wrap(new TermsQuery(names.indexName(), bytesRefs)); return new QueryWrapperFilter(new TermsQuery(names.indexName(), bytesRefs));
} }
} }
@ -529,7 +531,7 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
@Override @Override
public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
return Queries.wrap(new TermRangeQuery(names.indexName(), return new QueryWrapperFilter(new TermRangeQuery(names.indexName(),
lowerTerm == null ? null : indexedValueForSearch(lowerTerm), lowerTerm == null ? null : indexedValueForSearch(lowerTerm),
upperTerm == null ? null : indexedValueForSearch(upperTerm), upperTerm == null ? null : indexedValueForSearch(upperTerm),
includeLower, includeUpper)); includeLower, includeUpper));
@ -551,7 +553,7 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
@Override @Override
public Filter prefixFilter(Object value, @Nullable QueryParseContext context) { public Filter prefixFilter(Object value, @Nullable QueryParseContext context) {
return Queries.wrap(new PrefixQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); return new QueryWrapperFilter(new PrefixQuery(names().createIndexNameTerm(indexedValueForSearch(value))));
} }
@Override @Override
@ -565,7 +567,7 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
@Override @Override
public Filter regexpFilter(Object value, int flags, int maxDeterminizedStates, @Nullable QueryParseContext parseContext) { public Filter regexpFilter(Object value, int flags, int maxDeterminizedStates, @Nullable QueryParseContext parseContext) {
return Queries.wrap(new RegexpQuery(names().createIndexNameTerm(indexedValueForSearch(value)), flags, maxDeterminizedStates)); return new QueryWrapperFilter(new RegexpQuery(names().createIndexNameTerm(indexedValueForSearch(value)), flags, maxDeterminizedStates));
} }
@Override @Override

View File

@ -24,13 +24,13 @@ import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
@ -205,7 +205,7 @@ public class BooleanFieldMapper extends AbstractFieldMapper<Boolean> {
if (nullValue == null) { if (nullValue == null) {
return null; return null;
} }
return Queries.wrap(new TermQuery(names().createIndexNameTerm(nullValue ? Values.TRUE : Values.FALSE))); return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(nullValue ? Values.TRUE : Values.FALSE)));
} }
@Override @Override

View File

@ -27,6 +27,7 @@ import org.apache.lucene.index.Terms;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
@ -34,7 +35,6 @@ import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -212,7 +212,7 @@ public class ByteFieldMapper extends NumberFieldMapper<Byte> {
@Override @Override
public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper,
@Nullable QueryParseContext context) { @Nullable QueryParseContext context) {
return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
lowerTerm == null ? null : parseValueAsInt(lowerTerm), lowerTerm == null ? null : parseValueAsInt(lowerTerm),
upperTerm == null ? null : parseValueAsInt(upperTerm), upperTerm == null ? null : parseValueAsInt(upperTerm),
includeLower, includeUpper)); includeLower, includeUpper));
@ -231,7 +231,7 @@ public class ByteFieldMapper extends NumberFieldMapper<Byte> {
if (nullValue == null) { if (nullValue == null) {
return null; return null;
} }
return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
nullValue.intValue(), nullValue.intValue(),
nullValue.intValue(), nullValue.intValue(),
true, true)); true, true));

View File

@ -27,6 +27,7 @@ import org.apache.lucene.index.Terms;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
@ -39,8 +40,6 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.DateMathParser;
import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.lucene.search.NoCacheQuery;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.lucene.search.ResolvableFilter; import org.elasticsearch.common.lucene.search.ResolvableFilter;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
@ -392,7 +391,7 @@ public class DateFieldMapper extends NumberFieldMapper<Long> {
if (fieldData != null) { if (fieldData != null) {
filter = NumericRangeFieldDataFilter.newLongRange(fieldData, lowerVal,upperVal, includeLower, includeUpper); filter = NumericRangeFieldDataFilter.newLongRange(fieldData, lowerVal,upperVal, includeLower, includeUpper);
} else { } else {
filter = Queries.wrap(NumericRangeQuery.newLongRange( filter = new QueryWrapperFilter(NumericRangeQuery.newLongRange(
names.indexName(), precisionStep, lowerVal, upperVal, includeLower, includeUpper names.indexName(), precisionStep, lowerVal, upperVal, includeLower, includeUpper
)); ));
} }
@ -406,7 +405,7 @@ public class DateFieldMapper extends NumberFieldMapper<Long> {
return null; return null;
} }
long value = parseStringValue(nullValue); long value = parseStringValue(nullValue);
return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
value, value,
value, value,
true, true)); true, true));
@ -588,7 +587,7 @@ public class DateFieldMapper extends NumberFieldMapper<Long> {
} }
} }
public final class LateParsingQuery extends NoCacheQuery { public final class LateParsingQuery extends Query {
final Object lowerTerm; final Object lowerTerm;
final Object upperTerm; final Object upperTerm;
@ -613,7 +612,7 @@ public class DateFieldMapper extends NumberFieldMapper<Long> {
} }
@Override @Override
public String innerToString(String s) { public String toString(String s) {
final StringBuilder sb = new StringBuilder(); final StringBuilder sb = new StringBuilder();
return sb.append(names.indexName()).append(':') return sb.append(names.indexName()).append(':')
.append(includeLower ? '[' : '{') .append(includeLower ? '[' : '{')

View File

@ -31,6 +31,7 @@ import org.apache.lucene.index.Terms;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
@ -38,7 +39,6 @@ import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Numbers;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.common.util.ByteUtils;
@ -202,14 +202,14 @@ public class DoubleFieldMapper extends NumberFieldMapper<Double> {
@Override @Override
public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep,
lowerTerm == null ? null : parseDoubleValue(lowerTerm), lowerTerm == null ? null : parseDoubleValue(lowerTerm),
upperTerm == null ? null : parseDoubleValue(upperTerm), upperTerm == null ? null : parseDoubleValue(upperTerm),
includeLower, includeUpper)); includeLower, includeUpper));
} }
public Filter rangeFilter(Double lowerTerm, Double upperTerm, boolean includeLower, boolean includeUpper) { public Filter rangeFilter(Double lowerTerm, Double upperTerm, boolean includeLower, boolean includeUpper) {
return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, lowerTerm, upperTerm, includeLower, includeUpper)); return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, lowerTerm, upperTerm, includeLower, includeUpper));
} }
@Override @Override
@ -225,7 +225,7 @@ public class DoubleFieldMapper extends NumberFieldMapper<Double> {
if (nullValue == null) { if (nullValue == null) {
return null; return null;
} }
return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep,
nullValue, nullValue,
nullValue, nullValue,
true, true)); true, true));

View File

@ -31,6 +31,7 @@ import org.apache.lucene.index.Terms;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
@ -39,7 +40,6 @@ import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Numbers;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.common.util.ByteUtils;
@ -212,7 +212,7 @@ public class FloatFieldMapper extends NumberFieldMapper<Float> {
@Override @Override
public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
return Queries.wrap(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep,
lowerTerm == null ? null : parseValue(lowerTerm), lowerTerm == null ? null : parseValue(lowerTerm),
upperTerm == null ? null : parseValue(upperTerm), upperTerm == null ? null : parseValue(upperTerm),
includeLower, includeUpper)); includeLower, includeUpper));
@ -231,7 +231,7 @@ public class FloatFieldMapper extends NumberFieldMapper<Float> {
if (nullValue == null) { if (nullValue == null) {
return null; return null;
} }
return Queries.wrap(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep,
nullValue, nullValue,
nullValue, nullValue,
true, true)); true, true));

View File

@ -28,6 +28,7 @@ import org.apache.lucene.index.Terms;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
@ -36,7 +37,6 @@ import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Numbers;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -206,7 +206,7 @@ public class IntegerFieldMapper extends NumberFieldMapper<Integer> {
@Override @Override
public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
lowerTerm == null ? null : parseValue(lowerTerm), lowerTerm == null ? null : parseValue(lowerTerm),
upperTerm == null ? null : parseValue(upperTerm), upperTerm == null ? null : parseValue(upperTerm),
includeLower, includeUpper)); includeLower, includeUpper));
@ -225,7 +225,7 @@ public class IntegerFieldMapper extends NumberFieldMapper<Integer> {
if (nullValue == null) { if (nullValue == null) {
return null; return null;
} }
return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
nullValue, nullValue,
nullValue, nullValue,
true, true)); true, true));

View File

@ -28,6 +28,7 @@ import org.apache.lucene.index.Terms;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
@ -36,7 +37,6 @@ import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Numbers;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -196,7 +196,7 @@ public class LongFieldMapper extends NumberFieldMapper<Long> {
@Override @Override
public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
lowerTerm == null ? null : parseLongValue(lowerTerm), lowerTerm == null ? null : parseLongValue(lowerTerm),
upperTerm == null ? null : parseLongValue(upperTerm), upperTerm == null ? null : parseLongValue(upperTerm),
includeLower, includeUpper)); includeLower, includeUpper));
@ -215,7 +215,7 @@ public class LongFieldMapper extends NumberFieldMapper<Long> {
if (nullValue == null) { if (nullValue == null) {
return null; return null;
} }
return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
nullValue, nullValue,
nullValue, nullValue,
true, true)); true, true));

View File

@ -28,6 +28,7 @@ import org.apache.lucene.index.Terms;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
@ -36,7 +37,6 @@ import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Numbers;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -212,7 +212,7 @@ public class ShortFieldMapper extends NumberFieldMapper<Short> {
@Override @Override
public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
lowerTerm == null ? null : parseValueAsInt(lowerTerm), lowerTerm == null ? null : parseValueAsInt(lowerTerm),
upperTerm == null ? null : parseValueAsInt(upperTerm), upperTerm == null ? null : parseValueAsInt(upperTerm),
includeLower, includeUpper)); includeLower, includeUpper));
@ -220,7 +220,7 @@ public class ShortFieldMapper extends NumberFieldMapper<Short> {
@Override @Override
public Filter rangeFilter(QueryParseContext parseContext, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { public Filter rangeFilter(QueryParseContext parseContext, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
return Queries.wrap(NumericRangeFieldDataFilter.newShortRange((IndexNumericFieldData) parseContext.getForField(this), return new QueryWrapperFilter(NumericRangeFieldDataFilter.newShortRange((IndexNumericFieldData) parseContext.getForField(this),
lowerTerm == null ? null : parseValue(lowerTerm), lowerTerm == null ? null : parseValue(lowerTerm),
upperTerm == null ? null : parseValue(upperTerm), upperTerm == null ? null : parseValue(upperTerm),
includeLower, includeUpper)); includeLower, includeUpper));
@ -231,7 +231,7 @@ public class ShortFieldMapper extends NumberFieldMapper<Short> {
if (nullValue == null) { if (nullValue == null) {
return null; return null;
} }
return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep,
nullValue.intValue(), nullValue.intValue(),
nullValue.intValue(), nullValue.intValue(),
true, true)); true, true));

View File

@ -34,6 +34,7 @@ import org.apache.lucene.search.Filter;
import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.RegexpQuery;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -41,7 +42,6 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
@ -200,7 +200,7 @@ public class IdFieldMapper extends AbstractFieldMapper<String> implements Intern
if (fieldType.indexOptions() != IndexOptions.NONE || context == null) { if (fieldType.indexOptions() != IndexOptions.NONE || context == null) {
return super.termFilter(value, context); return super.termFilter(value, context);
} }
return Queries.wrap(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value))); return new QueryWrapperFilter(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value)));
} }
@Override @Override
@ -208,7 +208,7 @@ public class IdFieldMapper extends AbstractFieldMapper<String> implements Intern
if (fieldType.indexOptions() != IndexOptions.NONE || context == null) { if (fieldType.indexOptions() != IndexOptions.NONE || context == null) {
return super.termsFilter(values, context); return super.termsFilter(values, context);
} }
return Queries.wrap(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values))); return new QueryWrapperFilter(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values)));
} }
@Override @Override
@ -238,7 +238,7 @@ public class IdFieldMapper extends AbstractFieldMapper<String> implements Intern
for (String queryType : queryTypes) { for (String queryType : queryTypes) {
filter.add(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value)))), BooleanClause.Occur.SHOULD); filter.add(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value)))), BooleanClause.Occur.SHOULD);
} }
return Queries.wrap(filter); return new QueryWrapperFilter(filter);
} }
@Override @Override
@ -277,7 +277,7 @@ public class IdFieldMapper extends AbstractFieldMapper<String> implements Intern
filter.add(new RegexpQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))), filter.add(new RegexpQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))),
flags, maxDeterminizedStates), BooleanClause.Occur.SHOULD); flags, maxDeterminizedStates), BooleanClause.Occur.SHOULD);
} }
return Queries.wrap(filter); return new QueryWrapperFilter(filter);
} }
@Override @Override

View File

@ -28,6 +28,7 @@ import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -275,7 +276,7 @@ public class ParentFieldMapper extends AbstractFieldMapper<Uid> implements Inter
} }
BytesRef bValue = BytesRefs.toBytesRef(value); BytesRef bValue = BytesRefs.toBytesRef(value);
if (Uid.hasDelimiter(bValue)) { if (Uid.hasDelimiter(bValue)) {
return Queries.wrap(new TermQuery(new Term(names.indexName(), bValue))); return new QueryWrapperFilter(new TermQuery(new Term(names.indexName(), bValue)));
} }
List<String> types = new ArrayList<>(context.mapperService().types().size()); List<String> types = new ArrayList<>(context.mapperService().types().size());
@ -288,14 +289,14 @@ public class ParentFieldMapper extends AbstractFieldMapper<Uid> implements Inter
if (types.isEmpty()) { if (types.isEmpty()) {
return Queries.newMatchNoDocsFilter(); return Queries.newMatchNoDocsFilter();
} else if (types.size() == 1) { } else if (types.size() == 1) {
return Queries.wrap(new TermQuery(new Term(names.indexName(), Uid.createUidAsBytes(types.get(0), bValue)))); return new QueryWrapperFilter(new TermQuery(new Term(names.indexName(), Uid.createUidAsBytes(types.get(0), bValue))));
} else { } else {
// we use all non child types, cause we don't know if its exact or not... // we use all non child types, cause we don't know if its exact or not...
List<BytesRef> typesValues = new ArrayList<>(types.size()); List<BytesRef> typesValues = new ArrayList<>(types.size());
for (String type : context.mapperService().types()) { for (String type : context.mapperService().types()) {
typesValues.add(Uid.createUidAsBytes(type, bValue)); typesValues.add(Uid.createUidAsBytes(type, bValue));
} }
return Queries.wrap(new TermsQuery(names.indexName(), typesValues)); return new QueryWrapperFilter(new TermsQuery(names.indexName(), typesValues));
} }
} }
@ -328,7 +329,7 @@ public class ParentFieldMapper extends AbstractFieldMapper<Uid> implements Inter
} }
} }
} }
return Queries.wrap(new TermsQuery(names.indexName(), bValues)); return new QueryWrapperFilter(new TermsQuery(names.indexName(), bValues));
} }
/** /**

View File

@ -28,13 +28,13 @@ import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.FieldDataType;
@ -133,15 +133,15 @@ public class TypeFieldMapper extends AbstractFieldMapper<String> implements Inte
@Override @Override
public Query termQuery(Object value, @Nullable QueryParseContext context) { public Query termQuery(Object value, @Nullable QueryParseContext context) {
return new ConstantScoreQuery(context.cacheFilter(termFilter(value, context), null, context.autoFilterCachePolicy())); return new ConstantScoreQuery(termFilter(value, context));
} }
@Override @Override
public Filter termFilter(Object value, @Nullable QueryParseContext context) { public Filter termFilter(Object value, @Nullable QueryParseContext context) {
if (fieldType.indexOptions() == IndexOptions.NONE) { if (fieldType.indexOptions() == IndexOptions.NONE) {
return Queries.wrap(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value))))); return new QueryWrapperFilter(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value)))));
} }
return Queries.wrap(new TermQuery(names().createIndexNameTerm(BytesRefs.toBytesRef(value)))); return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(BytesRefs.toBytesRef(value))));
} }
@Override @Override

View File

@ -28,6 +28,7 @@ import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
@ -35,7 +36,6 @@ import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Numbers;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -253,7 +253,7 @@ public class IpFieldMapper extends NumberFieldMapper<Long> {
@Override @Override
public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
lowerTerm == null ? null : parseValue(lowerTerm), lowerTerm == null ? null : parseValue(lowerTerm),
upperTerm == null ? null : parseValue(upperTerm), upperTerm == null ? null : parseValue(upperTerm),
includeLower, includeUpper)); includeLower, includeUpper));
@ -273,7 +273,7 @@ public class IpFieldMapper extends NumberFieldMapper<Long> {
return null; return null;
} }
final long value = ipToLong(nullValue); final long value = ipToLong(nullValue);
return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep,
value, value,
value, value,
true, true)); true, true));

View File

@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper.object;
import com.google.common.collect.Iterables; import com.google.common.collect.Iterables;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchParseException;
@ -29,7 +30,6 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.CopyOnWriteHashMap; import org.elasticsearch.common.collect.CopyOnWriteHashMap;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -367,7 +367,7 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea
} }
this.nestedTypePathAsString = "__" + fullPath; this.nestedTypePathAsString = "__" + fullPath;
this.nestedTypePathAsBytes = new BytesRef(nestedTypePathAsString); this.nestedTypePathAsBytes = new BytesRef(nestedTypePathAsString);
this.nestedTypeFilter = Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, nestedTypePathAsBytes))); this.nestedTypeFilter = new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, nestedTypePathAsBytes)));
} }
@Override @Override

View File

@ -20,7 +20,6 @@
package org.elasticsearch.index.percolator; package org.elasticsearch.index.percolator;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
@ -28,7 +27,6 @@ import org.apache.lucene.util.CloseableThreadLocal;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -93,7 +91,7 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple
private CloseableThreadLocal<QueryParseContext> cache = new CloseableThreadLocal<QueryParseContext>() { private CloseableThreadLocal<QueryParseContext> cache = new CloseableThreadLocal<QueryParseContext>() {
@Override @Override
protected QueryParseContext initialValue() { protected QueryParseContext initialValue() {
return new QueryParseContext(shardId.index(), queryParserService, true); return new QueryParseContext(shardId.index(), queryParserService);
} }
}; };
@ -280,13 +278,7 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple
shard.refresh("percolator_load_queries"); shard.refresh("percolator_load_queries");
// Maybe add a mode load? This isn't really a write. We need write b/c state=post_recovery // Maybe add a mode load? This isn't really a write. We need write b/c state=post_recovery
try (Engine.Searcher searcher = shard.acquireSearcher("percolator_load_queries", true)) { try (Engine.Searcher searcher = shard.acquireSearcher("percolator_load_queries", true)) {
Query query = new ConstantScoreQuery( Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME));
indexCache.filter().cache(
Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME))),
null,
queryParserService.autoFilterCachePolicy()
)
);
QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService); QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
searcher.searcher().search(query, queryCollector); searcher.searcher().search(query, queryCollector);
Map<BytesRef, Query> queries = queryCollector.queries(); Map<BytesRef, Query> queries = queryCollector.queries();

View File

@ -34,9 +34,6 @@ public class AndFilterBuilder extends BaseFilterBuilder {
private ArrayList<FilterBuilder> filters = Lists.newArrayList(); private ArrayList<FilterBuilder> filters = Lists.newArrayList();
private Boolean cache;
private String cacheKey;
private String filterName; private String filterName;
public AndFilterBuilder(FilterBuilder... filters) { public AndFilterBuilder(FilterBuilder... filters) {
@ -53,19 +50,6 @@ public class AndFilterBuilder extends BaseFilterBuilder {
return this; return this;
} }
/**
* Should the filter be cached or not. Defaults to <tt>false</tt>.
*/
public AndFilterBuilder cache(boolean cache) {
this.cache = cache;
return this;
}
public AndFilterBuilder cacheKey(String cacheKey) {
this.cacheKey = cacheKey;
return this;
}
/** /**
* Sets the filter name for the filter that can be used when searching for matched_filters per hit. * Sets the filter name for the filter that can be used when searching for matched_filters per hit.
*/ */
@ -82,12 +66,6 @@ public class AndFilterBuilder extends BaseFilterBuilder {
filter.toXContent(builder, params); filter.toXContent(builder, params);
} }
builder.endArray(); builder.endArray();
if (cache != null) {
builder.field("_cache", cache);
}
if (cacheKey != null) {
builder.field("_cache_key", cacheKey);
}
if (filterName != null) { if (filterName != null) {
builder.field("_name", filterName); builder.field("_name", filterName);
} }

Some files were not shown because too many files have changed in this diff Show More