merged from master

This commit is contained in:
Boaz Leskes 2015-04-22 13:46:31 +02:00
commit 7bcf7ba218
547 changed files with 12078 additions and 11402 deletions

View File

@ -36,7 +36,7 @@ First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasti
h3. Installation
* "Download":https://www.elastic.co/products/elasticsearch/download and unzip the Elasticsearch official distribution.
* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.
* Run @bin/elasticsearch@ on unix, or @bin\elasticsearch.bat@ on windows.
* Run @curl -X GET http://localhost:9200/@.
* Start more servers ...

View File

@ -260,7 +260,7 @@ The REST tests are run automatically when executing the maven test command. To r
REST tests use the following command:
---------------------------------------------------------------------------
mvn test -Dtests.class=org.elasticsearch.test.rest.ElasticsearchRestTests
mvn test -Dtests.filter="@Rest"
---------------------------------------------------------------------------
`ElasticsearchRestTests` is the executable test class that runs all the
@ -298,3 +298,23 @@ You can also skip this by using the "dev" profile:
---------------------------------------------------------------------------
mvn test -Pdev
---------------------------------------------------------------------------
== Testing scripts
Shell scripts can be tested with the Bash Automate Testing System tool available
at https://github.com/sstephenson/bats. Once the tool is installed, you can
execute a .bats test file with the following command:
---------------------------------------------------------------------------
bats test_file.bats
---------------------------------------------------------------------------
When executing the test files located in the `/packaging/scripts` folder,
it's possible to add the flag `ES_CLEAN_BEFORE_TEST=true` to clean the test
environment before the tests are executed:
---------------------------------------------------------------------------
ES_CLEAN_BEFORE_TEST=true bats 30_deb_package.bats
---------------------------------------------------------------------------

View File

@ -33,3 +33,11 @@ java.nio.file.Path#toFile()
@defaultMessage Don't use deprecated lucene apis
org.apache.lucene.index.DocsEnum
org.apache.lucene.index.DocsAndPositionsEnum
org.apache.lucene.queries.TermFilter
org.apache.lucene.queries.TermsFilter
org.apache.lucene.search.TermRangeFilter
org.apache.lucene.search.NumericRangeFilter
org.apache.lucene.search.PrefixFilter
java.nio.file.Paths @ Use PathUtils.get instead.
java.nio.file.FileSystems#getDefault() @ use PathUtils.getDefault instead.

View File

@ -16,3 +16,5 @@
com.carrotsearch.randomizedtesting.RandomizedTest#globalTempDir() @ Use newTempDirPath() instead
com.carrotsearch.randomizedtesting.annotations.Seed @ Don't commit hardcoded seeds
org.apache.lucene.codecs.Codec#setDefault(org.apache.lucene.codecs.Codec) @ Use the SuppressCodecs("*") annotation instead

View File

@ -26,13 +26,16 @@ grant {
// contain read access to only what we need:
// project base directory
permission java.io.FilePermission "${project.basedir}${/}target${/}-", "read";
// read permission for lib sigar
permission java.io.FilePermission "${project.basedir}${/}lib/sigar{/}-", "read";
// mvn custom ./m2/repository for dependency jars
permission java.io.FilePermission "${m2.repository}${/}-", "read";
// system jar resources
permission java.io.FilePermission "${java.home}${/}-", "read";
// per-jvm directory
permission java.io.FilePermission "${junit4.childvm.cwd}${/}temp", "read,write";
permission java.io.FilePermission "${junit4.childvm.cwd}${/}temp${/}-", "read,write,delete";
permission java.io.FilePermission "${junit4.tempDir}${/}*", "read,write,delete";
permission java.nio.file.LinkPermission "symbolic";
permission groovy.security.GroovyCodeSourcePermission "/groovy/script";

View File

@ -9,26 +9,47 @@ analyzers:
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/_analyze?analyzer=standard' -d 'this is a test'
curl -XGET 'localhost:9200/_analyze' -d '
{
"analyzer" : "standard",
"text" : "this is a test"
}'
--------------------------------------------------
coming[2.0.0, body based parameters were added in 2.0.0]
Or by building a custom transient analyzer out of tokenizers,
token filters and char filters. Token filters can use the shorter 'filters'
parameter name:
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&filters=lowercase' -d 'this is a test'
curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&token_filters=lowercase&char_filters=html_strip' -d 'this is a <b>test</b>'
curl -XGET 'localhost:9200/_analyze' -d '
{
"tokenizer" : "keyword",
"filters" : ["lowercase"],
"text" : "this is a test"
}'
curl -XGET 'localhost:9200/_analyze' -d '
{
"tokenizer" : "keyword",
"token_filters" : ["lowercase"],
"char_filters" : ["html_strip"],
"text" : "this is a <b>test</b>"
}'
--------------------------------------------------
coming[2.0.0, body based parameters were added in 2.0.0]
It can also run against a specific index:
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/test/_analyze?text=this+is+a+test'
curl -XGET 'localhost:9200/test/_analyze' -d '
{
"text" : "this is a test"
}'
--------------------------------------------------
The above will run an analysis on the "this is a test" text, using the
@ -37,18 +58,42 @@ can also be provided to use a different analyzer:
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/test/_analyze?analyzer=whitespace' -d 'this is a test'
curl -XGET 'localhost:9200/test/_analyze' -d '
{
"analyzer" : "whitespace",
"text : "this is a test"
}'
--------------------------------------------------
coming[2.0.0, body based parameters were added in 2.0.0]
Also, the analyzer can be derived based on a field mapping, for example:
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/test/_analyze?field=obj1.field1' -d 'this is a test'
curl -XGET 'localhost:9200/test/_analyze' -d '
{
"field" : "obj1.field1",
"text" : "this is a test"
}'
--------------------------------------------------
coming[2.0.0, body based parameters were added in 2.0.0]
Will cause the analysis to happen based on the analyzer configured in the
mapping for `obj1.field1` (and if not, the default index analyzer).
Also, the text can be provided as part of the request body, and not as a
parameter.
All parameters can also supplied as request parameters. For example:
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&filters=lowercase&text=this+is+a+test'
--------------------------------------------------
For backwards compatibility, we also accept the text parameter as the body of the request,
provided it doesn't start with `{` :
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/_analyze?tokenizer=keyword&token_filters=lowercase&char_filters=html_strip' -d 'this is a <b>test</b>'
--------------------------------------------------

View File

@ -46,7 +46,13 @@ via the mapping API even if you use the precision parameter.
|`distance_error_pct` |Used as a hint to the PrefixTree about how
precise it should be. Defaults to 0.025 (2.5%) with 0.5 as the maximum
supported value.
supported value. PERFORMANCE NOTE: This value will be default to 0 if a `precision` or
`tree_level` definition is explicitly defined. This guarantees spatial precision
at the level defined in the mapping. This can lead to significant memory usage
for high resolution shapes with low error (e.g., large shapes at 1m with < 0.001 error).
To improve indexing performance (at the cost of query accuracy) explicitly define
`tree_level` or `precision` along with a reasonable `distance_error_pct`, noting
that large shapes will have greater false positives.
|`orientation` |Optionally define how to interpret vertex order for
polygons / multipolygons. This parameter defines one of two coordinate

View File

@ -374,9 +374,18 @@ http.cors.allow-origin: /https?:\/\/localhost(:[0-9]+)?/
The cluster state api doesn't return the `routing_nodes` section anymore when
`routing_table` is requested. The newly introduced `routing_nodes` flag can
be used separately to control whether `routing_nodes` should be returned.
=== Query DSL
The `fuzzy_like_this` and `fuzzy_like_this_field` queries have been removed.
The `limit` filter is deprecated and becomes a no-op. You can achieve similar
behaviour using the <<search-request-body,terminate_after>> parameter.
`or` and `and` on the one hand and `bool` on the other hand used to have
different performance characteristics depending on the wrapped filters. This is
fixed now, as a consequence the `or` and `and` filters are now deprecated in
favour or `bool`.
The `execution` option of the `terms` filter is now deprecated and ignored if
provided.

View File

@ -1,6 +1,8 @@
[[query-dsl-and-filter]]
=== And Filter
deprecated[2.0.0, Use the `bool` filter instead]
A filter that matches documents using the `AND` boolean operator on other
filters. Can be placed within queries that accept a filter.

View File

@ -1,6 +1,8 @@
[[query-dsl-or-filter]]
=== Or Filter
deprecated[2.0.0, Use the `bool` filter instead]
A filter that matches documents using the `OR` boolean operator on other
filters. Can be placed within queries that accept a filter.

View File

@ -18,71 +18,6 @@ Filters documents that have fields that match any of the provided terms
The `terms` filter is also aliased with `in` as the filter name for
simpler usage.
[float]
==== Execution Mode
The way terms filter executes is by iterating over the terms provided
and finding matches docs (loading into a bitset) and caching it.
Sometimes, we want a different execution model that can still be
achieved by building more complex queries in the DSL, but we can support
them in the more compact model that terms filter provides.
The `execution` option now has the following options :
[horizontal]
`plain`::
The default. Works as today. Iterates over all the terms,
building a bit set matching it, and filtering. The total filter is
cached.
`fielddata`::
Generates a terms filters that uses the fielddata cache to
compare terms. This execution mode is great to use when filtering
on a field that is already loaded into the fielddata cache from
aggregating, sorting, or index warmers. When filtering on
a large number of terms, this execution can be considerably faster
than the other modes. The total filter is not cached unless
explicitly configured to do so.
`bool`::
Generates a term filter (which is cached) for each term, and
wraps those in a bool filter. The bool filter itself is not cached as it
can operate very quickly on the cached term filters.
`and`::
Generates a term filter (which is cached) for each term, and
wraps those in an and filter. The and filter itself is not cached.
`or`::
Generates a term filter (which is cached) for each term, and
wraps those in an or filter. The or filter itself is not cached.
Generally, the `bool` execution mode should be preferred.
If you don't want the generated individual term queries to be cached,
you can use: `bool_nocache`, `and_nocache` or `or_nocache` instead, but
be aware that this will affect performance.
The "total" terms filter caching can still be explicitly controlled
using the `_cache` option. Note the default value for it depends on the
execution value.
For example:
[source,js]
--------------------------------------------------
{
"constant_score" : {
"filter" : {
"terms" : {
"user" : ["kimchy", "elasticsearch"],
"execution" : "bool",
"_cache": true
}
}
}
}
--------------------------------------------------
[float]
==== Caching

View File

@ -0,0 +1,154 @@
[[search-aggregations-bucket-sampler-aggregation]]
=== Sampler Aggregation
experimental[]
A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents.
Optionally, diversity settings can be used to limit the number of matches that share a common value such as an "author".
.Example use cases:
* Tightening the focus of analytics to high-relevance matches rather than the potentially very long tail of low-quality matches
* Removing bias from analytics by ensuring fair representation of content from different sources
* Reducing the running cost of aggregations that can produce useful results using only samples e.g. `significant_terms`
Example:
[source,js]
--------------------------------------------------
{
"query": {
"match": {
"text": "iphone"
}
},
"aggs": {
"sample": {
"sampler": {
"shard_size": 200,
"field" : "user.id"
},
"aggs": {
"keywords": {
"significant_terms": {
"field": "text"
}
}
}
}
}
}
--------------------------------------------------
Response:
[source,js]
--------------------------------------------------
{
...
"aggregations": {
"sample": {
"doc_count": 1000,<1>
"keywords": {<2>
"doc_count": 1000,
"buckets": [
...
{
"key": "bend",
"doc_count": 58,
"score": 37.982536582524276,
"bg_count": 103
},
....
}
--------------------------------------------------
<1> 1000 documents were sampled in total becase we asked for a maximum of 200 from an index with 5 shards. The cost of performing the nested significant_terms aggregation was therefore limited rather than unbounded.
<2> The results of the significant_terms aggregation are not skewed by any single over-active Twitter user because we asked for a maximum of one tweet from any one user in our sample.
==== shard_size
The `shard_size` parameter limits how many top-scoring documents are collected in the sample processed on each shard.
The default value is 100.
=== Controlling diversity
Optionally, you can use the `field` or `script` and `max_docs_per_value` settings to control the maximum number of documents collected on any one shard which share a common value.
The choice of value (e.g. `author`) is loaded from a regular `field` or derived dynamically by a `script`.
The aggregation will throw an error if the choice of field or script produces multiple values for a document.
It is currently not possible to offer this form of de-duplication using many values, primarily due to concerns over efficiency.
NOTE: Any good market researcher will tell you that when working with samples of data it is important
that the sample represents a healthy variety of opinions rather than being skewed by any single voice.
The same is true with aggregations and sampling with these diversify settings can offer a way to remove the bias in your content (an over-populated geography, a large spike in a timeline or an over-active forum spammer).
==== Field
Controlling diversity using a field:
[source,js]
--------------------------------------------------
{
"aggs" : {
"sample" : {
"sampler" : {
"field" : "author",
"max_docs_per_value" : 3
}
}
}
}
--------------------------------------------------
Note that the `max_docs_per_value` setting applies on a per-shard basis only for the purposes of shard-local sampling.
It is not intended as a way of providing a global de-duplication feature on search results.
==== Script
Controlling diversity using a script:
[source,js]
--------------------------------------------------
{
"aggs" : {
"sample" : {
"sampler" : {
"script" : "doc['author'].value + '/' + doc['genre'].value"
}
}
}
}
--------------------------------------------------
Note in the above example we chose to use the default `max_docs_per_value` setting of 1 and combine author and genre fields to ensure
each shard sample has, at most, one match for an author/genre pair.
==== execution_hint
When using the settings to control diversity, the optional `execution_hint` setting can influence the management of the values used for de-duplication.
Each option will hold up to `shard_size` values in memory while performing de-duplication but the type of value held can be controlled as follows:
- hold field values directly (`map`)
- hold ordinals of the field as determined by the Lucene index (`global_ordinals`)
- hold hashes of the field values - with potential for hash collisions (`bytes_hash`)
The default setting is to use `global_ordinals` if this information is available from the Lucene index and reverting to `map` if not.
The `bytes_hash` setting may prove faster in some cases but introduces the possibility of false positives in de-duplication logic due to the possibility of hash collisions.
Please note that Elasticsearch will ignore the choice of execution hint if it is not applicable and that there is no backward compatibility guarantee on these hints.
=== Limitations
==== Cannot be nested under `breadth_first` aggregations
Being a quality-based filter the sampler aggregation needs access to the relevance score produced for each document.
It therefore cannot be nested under a `terms` aggregation which has the `collect_mode` switched from the default `depth_first` mode to `breadth_first` as this discards scores.
In this situation an error will be thrown.
==== Limited de-dup logic.
The de-duplication logic in the diversify settings applies only at a shard level so will not apply across shards.
==== No specialized syntax for geo/date fields
Currently the syntax for defining the diversifying values is defined by a choice of `field` or `script` - there is no added syntactical sugar for expressing geo or date units such as "1w" (1 week).
This support may be added in a later release and users will currently have to create these sorts of values using a script.

View File

@ -414,8 +414,9 @@ The parameter `shard_min_doc_count` regulates the _certainty_ a shard has if the
NOTE: Setting `min_doc_count`=`0` will also return buckets for terms that didn't match any hit. However, some of
the returned terms which have a document count of zero might only belong to deleted documents, so there is
no warranty that a `match_all` query would find a positive document count for those terms.
the returned terms which have a document count of zero might only belong to deleted documents or documents
from other types, so there is no warranty that a `match_all` query would find a positive document count for
those terms.
WARNING: When NOT sorting on `doc_count` descending, high values of `min_doc_count` may return a number of buckets
which is less than `size` because not enough data was gathered from the shards. Missing buckets can be

View File

@ -55,20 +55,34 @@ results.
[source,js]
--------------------------------------------------
curl -XGET <1> 'localhost:9200/_search/scroll?scroll=1m' <2> <3> \
-d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1' <4>
curl -XGET <1> 'localhost:9200/_search/scroll' <2> -d'
{
"scroll" : "1m", <3>
"scroll_id" : "c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1" <4>
}
'
--------------------------------------------------
coming[2.0.0, body based parameters were added in 2.0.0]
<1> `GET` or `POST` can be used.
<2> The URL should not include the `index` or `type` name -- these
are specified in the original `search` request instead.
<3> The `scroll` parameter tells Elasticsearch to keep the search context open
for another `1m`.
<4> The `scroll_id` can be passed in the request body or in the
query string as `?scroll_id=....`
<4> The `scroll_id` parameter
Each call to the `scroll` API returns the next batch of results until there
are no more results left to return, ie the `hits` array is empty.
For backwards compatibility, `scroll_id` and `scroll` can be passed in the query string.
And the `scroll_id` can be passed in the request body
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/_search/scroll?scroll=1m' -d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1'
--------------------------------------------------
IMPORTANT: The initial search request and each subsequent scroll request
returns a new `scroll_id` -- only the most recent `scroll_id` should be
used.
@ -92,9 +106,9 @@ cost.
Normally, you just want to retrieve all results and the order doesn't matter.
Scrolling can be combined with the <<scan,`scan`>> search type to disable
sorting and to return results in the most efficient way possible. All that is
needed is to add `search_type=scan` to the query string of the initial search
request:
any scoring or sorting and to return results in the most efficient way
possible. All that is needed is to add `search_type=scan` to the query string
of the initial search request:
[source,js]
--------------------------------------------------
@ -114,7 +128,8 @@ curl 'localhost:9200/twitter/tweet/_search?scroll=1m&search_type=scan' <1> -d '
A scanning scroll request differs from a standard scroll request in four
ways:
* Sorting is disabled. Results are returned in the order they appear in the index.
* No score is calculated and sorting is disabled. Results are returned in
the order they appear in the index.
* Aggregations are not supported.
@ -126,6 +141,9 @@ ways:
results *per shard*, not per request, so a `size` of `10` which hits 5
shards will return a maximum of 50 results per `scroll` request.
If you want the scoring to happen, even without sorting on it, set the
`track_scores` parameter to `true`.
[[scroll-search-context]]
==== Keeping the search context alive
@ -164,19 +182,26 @@ clear a search context manually with the `clear-scroll` API:
[source,js]
---------------------------------------
curl -XDELETE localhost:9200/_search/scroll \
-d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1' <1>
curl -XDELETE localhost:9200/_search/scroll -d '
{
"scroll_id" : ["c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1"]
}'
---------------------------------------
<1> The `scroll_id` can be passed in the request body or in the query string.
Multiple scroll IDs can be passed as comma separated values:
coming[2.0.0, Body based parameters were added in 2.0.0]
Multiple scroll IDs can be passed as array:
[source,js]
---------------------------------------
curl -XDELETE localhost:9200/_search/scroll \
-d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1,aGVuRmV0Y2g7NTsxOnkxaDZ' <1>
curl -XDELETE localhost:9200/_search/scroll -d '
{
"scroll_id" : ["c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1", "aGVuRmV0Y2g7NTsxOnkxaDZ"]
}'
---------------------------------------
coming[2.0.0, Body based parameters were added in 2.0.0]
All search contexts can be cleared with the `_all` parameter:
[source,js]
@ -184,3 +209,12 @@ All search contexts can be cleared with the `_all` parameter:
curl -XDELETE localhost:9200/_search/scroll/_all
---------------------------------------
The `scroll_id` can also be passed as a query string parameter or in the request body.
Multiple scroll IDs can be passed as comma separated values:
[source,js]
---------------------------------------
curl -XDELETE localhost:9200/_search/scroll \
-d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1,aGVuRmV0Y2g7NTsxOnkxaDZ'
---------------------------------------

288
pom.xml
View File

@ -32,7 +32,9 @@
<properties>
<lucene.version>5.2.0</lucene.version>
<lucene.maven.version>5.2.0-snapshot-1674183</lucene.maven.version>
<lucene.snapshot.revision>1675100</lucene.snapshot.revision>
<lucene.maven.version>5.2.0-snapshot-${lucene.snapshot.revision}</lucene.maven.version>
<testframework.version>2.1.14</testframework.version>
<tests.jvms>auto</tests.jvms>
<tests.shuffle>true</tests.shuffle>
<tests.output>onerror</tests.output>
@ -40,7 +42,8 @@
<tests.bwc.path>${project.basedir}/backwards</tests.bwc.path>
<tests.locale>random</tests.locale>
<tests.timezone>random</tests.timezone>
<es.logger.level>INFO</es.logger.level>
<tests.slow>false</tests.slow>
<es.logger.level>ERROR</es.logger.level>
<tests.heap.size>512m</tests.heap.size>
<tests.heapdump.path>${basedir}/logs/</tests.heapdump.path>
<tests.topn>5</tests.topn>
@ -48,12 +51,15 @@
<!-- Properties used for building RPM & DEB packages (see common/packaging.properties) -->
<packaging.elasticsearch.home.dir>/usr/share/elasticsearch</packaging.elasticsearch.home.dir>
<packaging.elasticsearch.bin.dir>/usr/share/elasticsearch/bin</packaging.elasticsearch.bin.dir>
<packaging.elasticsearch.conf.dir>/etc/elasticsearch</packaging.elasticsearch.conf.dir>
<packaging.elasticsearch.data.dir>/var/lib/elasticsearch</packaging.elasticsearch.data.dir>
<packaging.elasticsearch.user>elasticsearch</packaging.elasticsearch.user>
<packaging.elasticsearch.group>elasticsearch</packaging.elasticsearch.group>
<packaging.elasticsearch.work.dir>/var/run/elasticsearch</packaging.elasticsearch.work.dir>
<packaging.elasticsearch.work.dir>/tmp/elasticsearch</packaging.elasticsearch.work.dir>
<packaging.elasticsearch.log.dir>/var/log/elasticsearch</packaging.elasticsearch.log.dir>
<packaging.elasticsearch.plugins.dir>${packaging.elasticsearch.home.dir}/plugins</packaging.elasticsearch.plugins.dir>
<packaging.elasticsearch.pid.dir>/var/run/elasticsearch</packaging.elasticsearch.pid.dir>
</properties>
@ -66,7 +72,7 @@
<repository>
<id>lucene-snapshots</id>
<name>Lucene Snapshots</name>
<url>https://download.elastic.co/lucenesnapshots/1674183</url>
<url>https://download.elastic.co/lucenesnapshots/${lucene.snapshot.revision}</url>
</repository>
</repositories>
@ -80,7 +86,7 @@
<dependency>
<groupId>com.carrotsearch.randomizedtesting</groupId>
<artifactId>randomizedtesting-runner</artifactId>
<version>2.1.13</version>
<version>${testframework.version}</version>
<scope>test</scope>
</dependency>
<dependency>
@ -501,7 +507,7 @@
<plugin>
<groupId>com.carrotsearch.randomizedtesting</groupId>
<artifactId>junit4-maven-plugin</artifactId>
<version>2.1.13</version>
<version>${testframework.version}</version>
<executions>
<execution>
<id>tests</id>
@ -510,9 +516,10 @@
<goal>junit4</goal>
</goals>
<configuration>
<heartbeat>20</heartbeat>
<heartbeat>10</heartbeat>
<jvmOutputAction>pipe,warn</jvmOutputAction>
<leaveTemporary>true</leaveTemporary>
<ifNoTests>fail</ifNoTests>
<listeners>
<report-ant-xml mavenExtensions="true"
dir="${project.build.directory}/surefire-reports"/>
@ -525,7 +532,19 @@
showStatusFailure="true"
showStatusIgnored="true"
showSuiteSummary="true"
timestamps="false"/>
timestamps="false">
<filtertrace>
<!-- custom filters: we carefully only omit test infra noise here -->
<containsstring contains=".SlaveMain." />
<containsregex pattern="^(\s+at )(org\.junit\.)" />
<!-- also includes anonymous classes inside these two: -->
<containsregex pattern="^(\s+at )(com\.carrotsearch\.randomizedtesting.RandomizedRunner)" />
<containsregex pattern="^(\s+at )(com\.carrotsearch\.randomizedtesting.ThreadLeakControl)" />
<containsregex pattern="^(\s+at )(com\.carrotsearch\.randomizedtesting.rules\.)" />
<containsregex pattern="^(\s+at )(org\.apache\.lucene.util\.TestRule)" />
<containsregex pattern="^(\s+at )(org\.apache\.lucene.util\.AbstractBeforeAfterRule)" />
</filtertrace>
</report-text>
<report-execution-times historyLength="20" file="${basedir}/${execution.hint.file}"/>
</listeners>
<assertions>
@ -561,7 +580,8 @@
<sysouts>${tests.verbose}</sysouts>
<seed>${tests.seed}</seed>
<haltOnFailure>${tests.failfast}</haltOnFailure>
<uniqueSuiteNames>false</uniqueSuiteNames>
<!-- enforce unique suite names, or reporting stuff can be screwed up -->
<uniqueSuiteNames>true</uniqueSuiteNames>
<systemProperties>
<!-- we use './temp' since this is per JVM and tests are forbidden from writing to CWD -->
<java.io.tmpdir>./temp</java.io.tmpdir>
@ -570,7 +590,6 @@
<tests.bwc.path>${tests.bwc.path}</tests.bwc.path>
<tests.bwc.version>${tests.bwc.version}</tests.bwc.version>
<tests.jvm.argline>${tests.jvm.argline}</tests.jvm.argline>
<tests.processors>${tests.processors}</tests.processors>
<tests.appendseed>${tests.appendseed}</tests.appendseed>
<tests.iters>${tests.iters}</tests.iters>
<tests.maxfailures>${tests.maxfailures}</tests.maxfailures>
@ -608,8 +627,7 @@
<tests.security.manager>${tests.security.manager}</tests.security.manager>
<tests.compatibility>${tests.compatibility}</tests.compatibility>
<java.awt.headless>true</java.awt.headless>
<!-- everything below is for security manager / test.policy -->
<junit4.tempDir>${project.build.directory}</junit4.tempDir>
<!-- security manager / test.policy -->
<java.security.policy>${basedir}/dev-tools/tests.policy</java.security.policy>
</systemProperties>
</configuration>
@ -667,6 +685,7 @@
<exclude name="**/org/elasticsearch/cluster/routing/shard_routes.txt"/>
<exclude name="target/**/*"/>
<exclude name=".metadata/**/*"/>
<exclude name=".idea/**/*"/>
<or>
<containsregexp expression="\bno(n|)commit\b" casesensitive="no"/>
<containsregexp expression="\t" casesensitive="no"/>
@ -1052,7 +1071,7 @@
<version>1.4</version>
<configuration>
<deb>${project.build.directory}/releases/${project.artifactId}-${project.version}.deb</deb>
<controlDir>${project.build.directory}/generated-packaging/deb/control</controlDir>
<controlDir>${project.build.directory}/generated-packaging/deb/scripts</controlDir>
</configuration>
<executions>
<execution>
@ -1062,31 +1081,44 @@
</goals>
<configuration>
<dataSet>
<!-- Add bin directory -->
<data>
<src>${project.basedir}/</src>
<includes>*.txt, *.textile</includes>
<excludes>LICENSE.txt, .DS_Store</excludes>
<type>directory</type>
<mapper>
<type>perm</type>
<prefix>${packaging.elasticsearch.home.dir}</prefix>
<user>root</user>
<group>root</group>
</mapper>
</data>
<data>
<!-- use the filtered one from the resources plugin -->
<src>${project.build.directory}/generated-packaging/deb/bin</src>
<type>directory</type>
<excludes>*.bat, .DS_Store, *.exe</excludes>
<includes>elasticsearch,elasticsearch.in.sh,plugin</includes>
<mapper>
<type>perm</type>
<prefix>${packaging.elasticsearch.home.dir}/bin</prefix>
<prefix>${packaging.elasticsearch.bin.dir}</prefix>
<filemode>755</filemode>
<user>root</user>
<group>root</group>
</mapper>
</data>
<!-- Add configuration files -->
<data>
<src>${project.basedir}/config</src>
<type>directory</type>
<excludes>.DS_Store</excludes>
<mapper>
<type>perm</type>
<prefix>${packaging.elasticsearch.conf.dir}</prefix>
<user>root</user>
<group>root</group>
</mapper>
</data>
<!-- Add environment vars file -->
<data>
<src>${project.build.directory}/generated-packaging/deb/env/elasticsearch</src>
<type>file</type>
<mapper>
<type>perm</type>
<prefix>/etc/default</prefix>
<filemode>644</filemode>
<user>root</user>
<group>root</group>
</mapper>
</data>
<!-- Add libs -->
<data>
<src>${project.build.directory}/</src>
<includes>${project.build.finalName}.jar</includes>
@ -1120,17 +1152,7 @@
<group>root</group>
</mapper>
</data>
<data>
<src>${project.build.directory}/generated-packaging/deb/default/</src>
<type>directory</type>
<excludes>.DS_Store</excludes>
<mapper>
<type>perm</type>
<prefix>/etc/default</prefix>
<user>root</user>
<group>root</group>
</mapper>
</data>
<!-- Add init.d files -->
<data>
<src>${project.build.directory}/generated-packaging/deb/init.d/</src>
<type>directory</type>
@ -1143,22 +1165,13 @@
<group>root</group>
</mapper>
</data>
<!-- Adds systemd file -->
<data>
<src>${project.build.directory}/generated-packaging/deb/systemd/elasticsearch.service</src>
<dst>/usr/lib/systemd/system/elasticsearch.service</dst>
<type>file</type>
</data>
<data>
<src>${project.basedir}/config</src>
<type>directory</type>
<excludes>.DS_Store</excludes>
<mapper>
<type>perm</type>
<prefix>${packaging.elasticsearch.conf.dir}</prefix>
<user>root</user>
<group>root</group>
</mapper>
</data>
<!-- Add lintian files -->
<data>
<src>${project.build.directory}/generated-packaging/deb/lintian</src>
<type>directory</type>
@ -1170,6 +1183,20 @@
<group>root</group>
</mapper>
</data>
<!-- Add readme files -->
<data>
<src>${project.basedir}/</src>
<includes>*.txt, *.textile</includes>
<excludes>LICENSE.txt, .DS_Store</excludes>
<type>directory</type>
<mapper>
<type>perm</type>
<prefix>${packaging.elasticsearch.home.dir}</prefix>
<user>root</user>
<group>root</group>
</mapper>
</data>
<!-- Add license files -->
<data>
<src>${project.build.directory}/generated-packaging/deb/copyright</src>
<dst>/usr/share/doc/elasticsearch/copyright</dst>
@ -1182,6 +1209,8 @@
<path>${packaging.elasticsearch.data.dir}</path>
<path>${packaging.elasticsearch.log.dir}</path>
<path>${packaging.elasticsearch.work.dir}</path>
<path>${packaging.elasticsearch.plugins.dir}</path>
<path>${packaging.elasticsearch.pid.dir}</path>
</paths>
<mapper>
<type>perm</type>
@ -1217,40 +1246,101 @@
<defaultUsername>root</defaultUsername>
<defaultGroupname>root</defaultGroupname>
<mappings>
<!-- Add bin directory -->
<mapping>
<directory>${packaging.elasticsearch.bin.dir}/</directory>
<filemode>755</filemode>
<sources>
<source>
<location>${project.build.directory}/generated-packaging/rpm/bin</location>
<includes>
<include>elasticsearch</include>
<include>elasticsearch.in.sh</include>
<include>plugin</include>
</includes>
</source>
</sources>
</mapping>
<!-- Add configuration files -->
<mapping>
<directory>${packaging.elasticsearch.conf.dir}/</directory>
<configuration>noreplace</configuration>
<sources>
<source>
<location>config/</location>
<location>${project.basedir}/config/</location>
<includes>
<include>*.yml</include>
</includes>
</source>
</sources>
</mapping>
<!-- Add environment vars file -->
<mapping>
<directory>/etc/sysconfig/</directory>
<directoryIncluded>false</directoryIncluded>
<configuration>noreplace</configuration>
<sources>
<source>
<location>${project.build.directory}/generated-packaging/rpm/sysconfig</location>
<location>${project.build.directory}/generated-packaging/rpm/env/</location>
<includes>
<include>elasticsearch</include>
</includes>
</source>
</sources>
</mapping>
<!-- Add libs -->
<mapping>
<directory>/etc/rc.d/init.d/</directory>
<directory>${packaging.elasticsearch.home.dir}/lib</directory>
<sources>
<source>
<location>target/lib/</location>
<includes>
<include>lucene*</include>
<include>*log4j*</include>
<include>jna*</include>
<include>spatial4j*</include>
<include>jts*</include>
<include>groovy*</include>
<include>antlr-runtime*</include>
<include>asm*</include>
</includes>
</source>
<source>
<location>${project.build.directory}/</location>
<includes>
<include>${project.build.finalName}.jar</include>
</includes>
</source>
</sources>
</mapping>
<mapping>
<directory>${packaging.elasticsearch.home.dir}/lib/sigar</directory>
<sources>
<source>
<location>lib/sigar</location>
<includes>
<include>sigar*.jar</include>
<include>libsigar-*-linux.*</include>
</includes>
</source>
</sources>
</mapping>
<!-- Add init.d files -->
<mapping>
<directory>/etc/init.d</directory>
<directoryIncluded>false</directoryIncluded>
<filemode>755</filemode>
<configuration>true</configuration>
<sources>
<source>
<location>${project.build.directory}/generated-packaging/rpm/init.d/elasticsearch</location>
<location>${project.build.directory}/generated-packaging/rpm/init.d</location>
<includes>
<include>elasticsearch</include>
</includes>
</source>
</sources>
</mapping>
<!-- Adds systemd file -->
<mapping>
<directory>/usr/lib/systemd/system/</directory>
<filemode>755</filemode>
@ -1289,74 +1379,7 @@
</source>
</sources>
</mapping>
<mapping>
<directory>${packaging.elasticsearch.work.dir}/</directory>
<filemode>755</filemode>
<username>${packaging.elasticsearch.user}</username>
<username>${packaging.elasticsearch.group}</username>
</mapping>
<mapping>
<directory>${packaging.elasticsearch.data.dir}/</directory>
<filemode>755</filemode>
<username>${packaging.elasticsearch.user}</username>
<username>${packaging.elasticsearch.group}</username>
</mapping>
<mapping>
<directory>${packaging.elasticsearch.log.dir}/</directory>
<filemode>755</filemode>
<username>${packaging.elasticsearch.user}</username>
<username>${packaging.elasticsearch.group}</username>
</mapping>
<mapping>
<directory>${packaging.elasticsearch.home.dir}/bin/</directory>
<filemode>755</filemode>
<sources>
<source>
<location>${project.build.directory}/generated-packaging/rpm/bin</location>
<includes>
<include>elasticsearch</include>
<include>elasticsearch.in.sh</include>
<include>plugin</include>
</includes>
</source>
</sources>
</mapping>
<mapping>
<directory>${packaging.elasticsearch.home.dir}/lib</directory>
<sources>
<source>
<location>target/lib/</location>
<includes>
<include>lucene*</include>
<include>*log4j*</include>
<include>jna*</include>
<include>spatial4j*</include>
<include>jts*</include>
<include>groovy*</include>
<include>antlr-runtime*</include>
<include>asm*</include>
</includes>
</source>
<source>
<location>${project.build.directory}/</location>
<includes>
<include>${project.build.finalName}.jar</include>
</includes>
</source>
</sources>
</mapping>
<mapping>
<directory>${packaging.elasticsearch.home.dir}/lib/sigar</directory>
<sources>
<source>
<location>lib/sigar</location>
<includes>
<include>sigar*.jar</include>
<include>libsigar-*-linux.*</include>
</includes>
</source>
</sources>
</mapping>
<!-- Add readme files -->
<mapping>
<directory>${packaging.elasticsearch.home.dir}</directory>
<sources>
@ -1389,21 +1412,33 @@
<username>${packaging.elasticsearch.user}</username>
<groupname>${packaging.elasticsearch.group}</groupname>
</mapping>
<mapping>
<directory>${packaging.elasticsearch.plugins.dir}</directory>
<filemode>755</filemode>
<username>${packaging.elasticsearch.user}</username>
<groupname>${packaging.elasticsearch.group}</groupname>
</mapping>
<mapping>
<directory>${packaging.elasticsearch.pid.dir}</directory>
<filemode>755</filemode>
<username>${packaging.elasticsearch.user}</username>
<groupname>${packaging.elasticsearch.group}</groupname>
</mapping>
</mappings>
<preinstallScriptlet>
<scriptFile>${project.build.directory}/generated-packaging/rpm/scripts/preinstall</scriptFile>
<scriptFile>${project.build.directory}/generated-packaging/rpm/scripts/preinst</scriptFile>
<fileEncoding>utf-8</fileEncoding>
</preinstallScriptlet>
<postinstallScriptlet>
<scriptFile>${project.build.directory}/generated-packaging/rpm/scripts/postinstall</scriptFile>
<scriptFile>${project.build.directory}/generated-packaging/rpm/scripts/postinst</scriptFile>
<fileEncoding>utf-8</fileEncoding>
</postinstallScriptlet>
<preremoveScriptlet>
<scriptFile>${project.build.directory}/generated-packaging/rpm/scripts/preremove</scriptFile>
<scriptFile>${project.build.directory}/generated-packaging/rpm/scripts/prerm</scriptFile>
<fileEncoding>utf-8</fileEncoding>
</preremoveScriptlet>
<postremoveScriptlet>
<scriptFile>${project.build.directory}/generated-packaging/rpm/scripts/postremove</scriptFile>
<scriptFile>${project.build.directory}/generated-packaging/rpm/scripts/postrm</scriptFile>
<fileEncoding>utf-8</fileEncoding>
</postremoveScriptlet>
</configuration>
@ -1625,6 +1660,7 @@
<version>2.9</version>
<configuration>
<buildOutputDirectory>eclipse-build</buildOutputDirectory>
<downloadSources>true</downloadSources>
</configuration>
</plugin>
</plugins>

View File

@ -65,7 +65,7 @@ skipped, and the reason why the tests are skipped. For instance:
....
"Parent":
- skip:
version: "0 - 0.90.2"
version: "0.20.1 - 0.90.2"
reason: Delete ignores the parent param
- do:
@ -75,14 +75,17 @@ skipped, and the reason why the tests are skipped. For instance:
All tests in the file following the skip statement should be skipped if:
`min <= current <= max`.
The `version` range should always have an upper bound. Versions should
either have each version part compared numerically, or should be converted
to a string with sufficient digits to allow string comparison, eg
The `version` range can leave either bound empty, which means "open ended".
For instance:
....
"Parent":
- skip:
version: "1.0.0.Beta1 - "
reason: Delete ignores the parent param
0.90.2 -> 000-090-002
Snapshot versions and versions of the form `1.0.0.Beta1` can be treated
as the rounded down version, eg `1.0.0`.
- do:
... test definitions ...
....
The skip section can also be used to list new features that need to be
supported in order to run a test. This way the up-to-date runners will

View File

@ -1,7 +1,7 @@
---
setup:
- skip:
version: 0 - 999
version: " - "
reason: leaves transient metadata behind, need to fix it
---
"Test put settings":

View File

@ -48,3 +48,18 @@ setup:
- length: { tokens: 2 }
- match: { tokens.0.token: Foo }
- match: { tokens.1.token: Bar! }
---
"JSON in Body":
- do:
indices.analyze:
body: { "text": "Foo Bar", "filters": ["lowercase"], "tokenizer": keyword }
- length: {tokens: 1 }
- match: { tokens.0.token: foo bar }
---
"Body params override query string":
- do:
indices.analyze:
text: Foo Bar
body: { "text": "Bar Foo", "filters": ["lowercase"], "tokenizer": keyword }
- length: {tokens: 1 }
- match: { tokens.0.token: bar foo }

View File

@ -12,7 +12,7 @@
indices.get_mapping:
index: test_index
- match: { test_index.mappings.type_1.properties: {}}
- match: { test_index.mappings.type_1: {}}
---
"Create index with settings":
@ -106,7 +106,7 @@
indices.get_mapping:
index: test_index
- match: { test_index.mappings.type_1.properties: {}}
- match: { test_index.mappings.type_1: {}}
- do:
indices.get_settings:

View File

@ -166,7 +166,7 @@ setup:
"Should return test_index_3 if expand_wildcards=closed":
- skip:
version: "0 - 2.0.0"
version: " - 2.0.0"
reason: Requires fix for issue 7258
- do:

View File

@ -202,7 +202,7 @@ setup:
"Getting alias on an non-existent index should return 404":
- skip:
version: 1 - 999
version: "1.0.0.Beta1 - "
reason: not implemented yet
- do:
catch: missing

View File

@ -21,10 +21,10 @@ setup:
- do:
indices.get_mapping: {}
- match: { test_1.mappings.type_1.properties: {}}
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_3.properties: {}}
- match: { test_1.mappings.type_1: {}}
- match: { test_1.mappings.type_2: {}}
- match: { test_2.mappings.type_2: {}}
- match: { test_2.mappings.type_3: {}}
---
"Get /{index}/_mapping":
@ -33,8 +33,8 @@ setup:
indices.get_mapping:
index: test_1
- match: { test_1.mappings.type_1.properties: {}}
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_1.mappings.type_1: {}}
- match: { test_1.mappings.type_2: {}}
- is_false: test_2
@ -46,8 +46,8 @@ setup:
index: test_1
type: _all
- match: { test_1.mappings.type_1.properties: {}}
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_1.mappings.type_1: {}}
- match: { test_1.mappings.type_2: {}}
- is_false: test_2
---
@ -58,8 +58,8 @@ setup:
index: test_1
type: '*'
- match: { test_1.mappings.type_1.properties: {}}
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_1.mappings.type_1: {}}
- match: { test_1.mappings.type_2: {}}
- is_false: test_2
---
@ -70,7 +70,7 @@ setup:
index: test_1
type: type_1
- match: { test_1.mappings.type_1.properties: {}}
- match: { test_1.mappings.type_1: {}}
- is_false: test_1.mappings.type_2
- is_false: test_2
@ -82,8 +82,8 @@ setup:
index: test_1
type: type_1,type_2
- match: { test_1.mappings.type_1.properties: {}}
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_1.mappings.type_1: {}}
- match: { test_1.mappings.type_2: {}}
- is_false: test_2
---
@ -94,7 +94,7 @@ setup:
index: test_1
type: '*2'
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_1.mappings.type_2: {}}
- is_false: test_1.mappings.type_1
- is_false: test_2
@ -105,8 +105,8 @@ setup:
indices.get_mapping:
type: type_2
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_2.properties: {}}
- match: { test_1.mappings.type_2: {}}
- match: { test_2.mappings.type_2: {}}
- is_false: test_1.mappings.type_1
- is_false: test_2.mappings.type_3
@ -118,8 +118,8 @@ setup:
index: _all
type: type_2
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_2.properties: {}}
- match: { test_1.mappings.type_2: {}}
- match: { test_2.mappings.type_2: {}}
- is_false: test_1.mappings.type_1
- is_false: test_2.mappings.type_3
@ -131,8 +131,8 @@ setup:
index: '*'
type: type_2
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_2.properties: {}}
- match: { test_1.mappings.type_2: {}}
- match: { test_2.mappings.type_2: {}}
- is_false: test_1.mappings.type_1
- is_false: test_2.mappings.type_3
@ -144,8 +144,8 @@ setup:
index: test_1,test_2
type: type_2
- match: { test_1.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_2.properties: {}}
- match: { test_1.mappings.type_2: {}}
- match: { test_2.mappings.type_2: {}}
- is_false: test_2.mappings.type_3
---
@ -156,6 +156,6 @@ setup:
index: '*2'
type: type_2
- match: { test_2.mappings.type_2.properties: {}}
- match: { test_2.mappings.type_2: {}}
- is_false: test_1
- is_false: test_2.mappings.type_3

View File

@ -56,8 +56,8 @@ setup:
indices.get_mapping:
index: test-x*
- match: { test-xxx.mappings.type_1.properties: {}}
- match: { test-xxy.mappings.type_2.properties: {}}
- match: { test-xxx.mappings.type_1: {}}
- match: { test-xxy.mappings.type_2: {}}
---
"Get test-* with wildcard_expansion=all":
@ -67,9 +67,9 @@ setup:
index: test-x*
expand_wildcards: all
- match: { test-xxx.mappings.type_1.properties: {}}
- match: { test-xxy.mappings.type_2.properties: {}}
- match: { test-xyy.mappings.type_3.properties: {}}
- match: { test-xxx.mappings.type_1: {}}
- match: { test-xxy.mappings.type_2: {}}
- match: { test-xyy.mappings.type_3: {}}
---
"Get test-* with wildcard_expansion=open":
@ -79,8 +79,8 @@ setup:
index: test-x*
expand_wildcards: open
- match: { test-xxx.mappings.type_1.properties: {}}
- match: { test-xxy.mappings.type_2.properties: {}}
- match: { test-xxx.mappings.type_1: {}}
- match: { test-xxy.mappings.type_2: {}}
---
"Get test-* with wildcard_expansion=closed":
@ -90,7 +90,7 @@ setup:
index: test-x*
expand_wildcards: closed
- match: { test-xyy.mappings.type_3.properties: {}}
- match: { test-xyy.mappings.type_3: {}}
---
"Get test-* with wildcard_expansion=none":
@ -110,8 +110,8 @@ setup:
index: test-x*
expand_wildcards: open,closed
- match: { test-xxx.mappings.type_1.properties: {}}
- match: { test-xxy.mappings.type_2.properties: {}}
- match: { test-xyy.mappings.type_3.properties: {}}
- match: { test-xxx.mappings.type_1: {}}
- match: { test-xxy.mappings.type_2: {}}
- match: { test-xyy.mappings.type_3: {}}

View File

@ -81,7 +81,7 @@ setup:
---
"put settings in list of indices":
- skip:
version: 1 - 999
version: " - "
reason: list of indices not implemented yet
- do:
indices.put_settings:

View File

@ -66,4 +66,6 @@ setup:
- is_true: indices.test2.total.docs
- is_true: indices.test2.total.docs
- is_true: indices.test2.shards
- is_true: indices.test1.shards.0.0.commit.id
- is_true: indices.test2.shards.0.0.commit.id

View File

@ -32,5 +32,5 @@
- is_true: valid
- match: {_shards.failed: 0}
- match: {explanations.0.index: 'testing'}
- match: {explanations.0.explanation: 'ConstantScore(*:*)'}
- match: {explanations.0.explanation: '*:*'}

View File

@ -112,8 +112,7 @@
- do:
scroll:
scroll_id: $scroll_id
scroll: 1m
body: { "scroll_id": "$scroll_id", "scroll": "1m"}
- match: {hits.total: 2 }
- length: {hits.hits: 1 }
@ -131,3 +130,63 @@
clear_scroll:
scroll_id: $scroll_id
---
"Body params override query string":
- do:
indices.create:
index: test_scroll
- do:
index:
index: test_scroll
type: test
id: 42
body: { foo: 1 }
- do:
index:
index: test_scroll
type: test
id: 43
body: { foo: 2 }
- do:
indices.refresh: {}
- do:
search:
index: test_scroll
size: 1
scroll: 1m
sort: foo
body:
query:
match_all: {}
- set: {_scroll_id: scroll_id}
- match: {hits.total: 2 }
- length: {hits.hits: 1 }
- match: {hits.hits.0._id: "42" }
- do:
index:
index: test_scroll
type: test
id: 44
body: { foo: 3 }
- do:
indices.refresh: {}
- do:
scroll:
scroll_id: invalid_scroll_id
body: { "scroll_id": "$scroll_id", "scroll": "1m"}
- match: {hits.total: 2 }
- length: {hits.hits: 1 }
- match: {hits.hits.0._id: "43" }
- do:
clear_scroll:
scroll_id: $scroll_id

View File

@ -37,3 +37,44 @@
catch: missing
clear_scroll:
scroll_id: $scroll_id1
---
"Body params override query string":
- do:
indices.create:
index: test_scroll
- do:
index:
index: test_scroll
type: test
id: 42
body: { foo: bar }
- do:
indices.refresh: {}
- do:
search:
index: test_scroll
search_type: scan
scroll: 1m
body:
query:
match_all: {}
- set: {_scroll_id: scroll_id1}
- do:
clear_scroll:
scroll_id: "invalid_scroll_id"
body: { "scroll_id": [ "$scroll_id1" ]}
- do:
catch: missing
scroll:
scroll_id: $scroll_id1
- do:
catch: missing
clear_scroll:
scroll_id: $scroll_id1

View File

@ -2,7 +2,7 @@
"Metadata Fields":
- skip:
version: "0 - 999"
version: " - "
reason: "Update doesn't return metadata fields, waiting for #3259"
- do:

View File

@ -22,25 +22,20 @@ package org.apache.lucene.search.vectorhighlight;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.BlendedTermQuery;
import org.apache.lucene.queries.FilterClause;
import org.apache.lucene.queries.TermFilter;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.FilteredQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.MultiTermQueryWrapperFilter;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
import org.elasticsearch.common.lucene.search.XBooleanFilter;
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.Collection;
import java.util.List;
@ -48,19 +43,9 @@ import java.util.List;
*
*/
// LUCENE MONITOR
// TODO: remove me!
public class CustomFieldQuery extends FieldQuery {
private static Field multiTermQueryWrapperFilterQueryField;
static {
try {
multiTermQueryWrapperFilterQueryField = MultiTermQueryWrapperFilter.class.getDeclaredField("query");
multiTermQueryWrapperFilterQueryField.setAccessible(true);
} catch (NoSuchFieldException e) {
// ignore
}
}
public static final ThreadLocal<Boolean> highlightFilters = new ThreadLocal<>();
public CustomFieldQuery(Query query, IndexReader reader, FastVectorHighlighter highlighter) throws IOException {
@ -140,25 +125,8 @@ public class CustomFieldQuery extends FieldQuery {
if (highlight == null || highlight.equals(Boolean.FALSE)) {
return;
}
if (sourceFilter instanceof TermFilter) {
// TermFilter is just a deprecated wrapper over QWF
TermQuery actualQuery = (TermQuery) ((TermFilter) sourceFilter).getQuery();
flatten(new TermQuery(actualQuery.getTerm()), reader, flatQueries);
} else if (sourceFilter instanceof MultiTermQueryWrapperFilter) {
if (multiTermQueryWrapperFilterQueryField != null) {
try {
flatten((Query) multiTermQueryWrapperFilterQueryField.get(sourceFilter), reader, flatQueries);
} catch (IllegalAccessException e) {
// ignore
}
}
} else if (sourceFilter instanceof XBooleanFilter) {
XBooleanFilter booleanFilter = (XBooleanFilter) sourceFilter;
for (FilterClause clause : booleanFilter.clauses()) {
if (clause.getOccur() == BooleanClause.Occur.MUST || clause.getOccur() == BooleanClause.Occur.SHOULD) {
flatten(clause.getFilter(), reader, flatQueries);
}
}
if (sourceFilter instanceof QueryWrapperFilter) {
flatten(((QueryWrapperFilter) sourceFilter).getQuery(), reader, flatQueries);
}
}
}

View File

@ -0,0 +1,197 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.spatial.prefix;
import com.spatial4j.core.shape.Point;
import com.spatial4j.core.shape.Shape;
import org.apache.lucene.search.Filter;
import org.apache.lucene.spatial.prefix.tree.Cell;
import org.apache.lucene.spatial.prefix.tree.CellIterator;
import org.apache.lucene.spatial.prefix.tree.LegacyCell;
import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree;
import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
import org.apache.lucene.spatial.query.SpatialArgs;
import org.apache.lucene.spatial.query.SpatialOperation;
import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
/**
* A {@link PrefixTreeStrategy} which uses {@link AbstractVisitingPrefixTreeFilter}.
* This strategy has support for searching non-point shapes (note: not tested).
* Even a query shape with distErrPct=0 (fully precise to the grid) should have
* good performance for typical data, unless there is a lot of indexed data
* coincident with the shape's edge.
*
* @lucene.experimental
*
* NOTE: Will be removed upon commit of LUCENE-6422
*/
public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy {
/* Future potential optimizations:
Each shape.relate(otherShape) result could be cached since much of the same relations will be invoked when
multiple segments are involved. Do this for "complex" shapes, not cheap ones, and don't cache when disjoint to
bbox because it's a cheap calc. This is one advantage TermQueryPrefixTreeStrategy has over RPT.
*/
protected int prefixGridScanLevel;
//Formerly known as simplifyIndexedCells. Eventually will be removed. Only compatible with RPT
// and a LegacyPrefixTree.
protected boolean pruneLeafyBranches = true;
protected boolean multiOverlappingIndexedShapes = true;
public RecursivePrefixTreeStrategy(SpatialPrefixTree grid, String fieldName) {
super(grid, fieldName);
prefixGridScanLevel = grid.getMaxLevels() - 4;//TODO this default constant is dependent on the prefix grid size
}
public int getPrefixGridScanLevel() {
return prefixGridScanLevel;
}
/**
* Sets the grid level [1-maxLevels] at which indexed terms are scanned brute-force
* instead of by grid decomposition. By default this is maxLevels - 4. The
* final level, maxLevels, is always scanned.
*
* @param prefixGridScanLevel 1 to maxLevels
*/
public void setPrefixGridScanLevel(int prefixGridScanLevel) {
//TODO if negative then subtract from maxlevels
this.prefixGridScanLevel = prefixGridScanLevel;
}
public boolean isMultiOverlappingIndexedShapes() {
return multiOverlappingIndexedShapes;
}
/** See {@link ContainsPrefixTreeFilter#multiOverlappingIndexedShapes}. */
public void setMultiOverlappingIndexedShapes(boolean multiOverlappingIndexedShapes) {
this.multiOverlappingIndexedShapes = multiOverlappingIndexedShapes;
}
public boolean isPruneLeafyBranches() {
return pruneLeafyBranches;
}
/** An optional hint affecting non-point shapes: it will
* simplify/aggregate sets of complete leaves in a cell to its parent, resulting in ~20-25%
* fewer indexed cells. However, it will likely be removed in the future. (default=true)
*/
public void setPruneLeafyBranches(boolean pruneLeafyBranches) {
this.pruneLeafyBranches = pruneLeafyBranches;
}
@Override
public String toString() {
StringBuilder str = new StringBuilder(getClass().getSimpleName()).append('(');
str.append("SPG:(").append(grid.toString()).append(')');
if (pointsOnly)
str.append(",pointsOnly");
if (pruneLeafyBranches)
str.append(",pruneLeafyBranches");
if (prefixGridScanLevel != grid.getMaxLevels() - 4)
str.append(",prefixGridScanLevel:").append(""+prefixGridScanLevel);
if (!multiOverlappingIndexedShapes)
str.append(",!multiOverlappingIndexedShapes");
return str.append(')').toString();
}
@Override
protected Iterator<Cell> createCellIteratorToIndex(Shape shape, int detailLevel, Iterator<Cell> reuse) {
if (shape instanceof Point || !pruneLeafyBranches || grid instanceof PackedQuadPrefixTree)
return super.createCellIteratorToIndex(shape, detailLevel, reuse);
List<Cell> cells = new ArrayList<>(4096);
recursiveTraverseAndPrune(grid.getWorldCell(), shape, detailLevel, cells);
return cells.iterator();
}
/** Returns true if cell was added as a leaf. If it wasn't it recursively descends. */
private boolean recursiveTraverseAndPrune(Cell cell, Shape shape, int detailLevel, List<Cell> result) {
// Important: this logic assumes Cells don't share anything with other cells when
// calling cell.getNextLevelCells(). This is only true for LegacyCell.
if (!(cell instanceof LegacyCell))
throw new IllegalStateException("pruneLeafyBranches must be disabled for use with grid "+grid);
if (cell.getLevel() == detailLevel) {
cell.setLeaf();//FYI might already be a leaf
}
if (cell.isLeaf()) {
result.add(cell);
return true;
}
if (cell.getLevel() != 0)
result.add(cell);
int leaves = 0;
CellIterator subCells = cell.getNextLevelCells(shape);
while (subCells.hasNext()) {
Cell subCell = subCells.next();
if (recursiveTraverseAndPrune(subCell, shape, detailLevel, result))
leaves++;
}
//can we prune?
if (leaves == ((LegacyCell)cell).getSubCellsSize() && cell.getLevel() != 0) {
//Optimization: substitute the parent as a leaf instead of adding all
// children as leaves
//remove the leaves
do {
result.remove(result.size() - 1);//remove last
} while (--leaves > 0);
//add cell as the leaf
cell.setLeaf();
return true;
}
return false;
}
@Override
public Filter makeFilter(SpatialArgs args) {
final SpatialOperation op = args.getOperation();
Shape shape = args.getShape();
int detailLevel = grid.getLevelForDistance(args.resolveDistErr(ctx, distErrPct));
if (op == SpatialOperation.Intersects) {
return new IntersectsPrefixTreeFilter(
shape, getFieldName(), grid, detailLevel, prefixGridScanLevel);
} else if (op == SpatialOperation.IsWithin) {
return new WithinPrefixTreeFilter(
shape, getFieldName(), grid, detailLevel, prefixGridScanLevel,
-1);//-1 flag is slower but ensures correct results
} else if (op == SpatialOperation.Contains) {
return new ContainsPrefixTreeFilter(shape, getFieldName(), grid, detailLevel,
multiOverlappingIndexedShapes);
}
throw new UnsupportedSpatialOperation(op);
}
}

View File

@ -0,0 +1,81 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.spatial.prefix.tree;
import java.util.Iterator;
import java.util.NoSuchElementException;
/**
* An Iterator of SpatialPrefixTree Cells. The order is always sorted without duplicates.
*
* @lucene.experimental
*
* NOTE: Will be removed upon commit of LUCENE-6422
*/
public abstract class CellIterator implements Iterator<Cell> {
//note: nextCell or thisCell can be non-null but neither at the same time. That's
// because they might return the same instance when re-used!
protected Cell nextCell;//to be returned by next(), and null'ed after
protected Cell thisCell;//see next() & thisCell(). Should be cleared in hasNext().
/** Returns the cell last returned from {@link #next()}. It's cleared by hasNext(). */
public Cell thisCell() {
assert thisCell != null : "Only call thisCell() after next(), not hasNext()";
return thisCell;
}
// Arguably this belongs here and not on Cell
//public SpatialRelation getShapeRel()
/**
* Gets the next cell that is &gt;= {@code fromCell}, compared using non-leaf bytes. If it returns null then
* the iterator is exhausted.
*/
public Cell nextFrom(Cell fromCell) {
while (true) {
if (!hasNext())
return null;
Cell c = next();//will update thisCell
if (c.compareToNoLeaf(fromCell) >= 0) {
return c;
}
}
}
/** This prevents sub-cells (those underneath the current cell) from being iterated to,
* if applicable, otherwise a NO-OP. */
@Override
public void remove() {
assert thisCell != null;
}
@Override
public Cell next() {
if (nextCell == null) {
if (!hasNext())
throw new NoSuchElementException();
}
thisCell = nextCell;
nextCell = null;
return thisCell;
}
}

View File

@ -0,0 +1,248 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.spatial.prefix.tree;
import com.spatial4j.core.shape.Point;
import com.spatial4j.core.shape.Shape;
import com.spatial4j.core.shape.SpatialRelation;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.StringHelper;
import java.util.Collection;
/** The base for the original two SPT's: Geohash and Quad. Don't subclass this for new SPTs.
* @lucene.internal
*
* NOTE: Will be removed upon commit of LUCENE-6422
*/
//public for RPT pruneLeafyBranches code
public abstract class LegacyCell implements Cell {
// Important: A LegacyCell doesn't share state for getNextLevelCells(), and
// LegacySpatialPrefixTree assumes this in its simplify tree logic.
private static final byte LEAF_BYTE = '+';//NOTE: must sort before letters & numbers
//Arguably we could simply use a BytesRef, using an extra Object.
protected byte[] bytes;//generally bigger to potentially hold a leaf
protected int b_off;
protected int b_len;//doesn't reflect leaf; same as getLevel()
protected boolean isLeaf;
/**
* When set via getSubCells(filter), it is the relationship between this cell
* and the given shape filter. Doesn't participate in shape equality.
*/
protected SpatialRelation shapeRel;
protected Shape shape;//cached
/** Warning: Refers to the same bytes (no copy). If {@link #setLeaf()} is subsequently called then it
* may modify bytes. */
protected LegacyCell(byte[] bytes, int off, int len) {
this.bytes = bytes;
this.b_off = off;
this.b_len = len;
readLeafAdjust();
}
protected void readCell(BytesRef bytes) {
shapeRel = null;
shape = null;
this.bytes = bytes.bytes;
this.b_off = bytes.offset;
this.b_len = (short) bytes.length;
readLeafAdjust();
}
protected void readLeafAdjust() {
isLeaf = (b_len > 0 && bytes[b_off + b_len - 1] == LEAF_BYTE);
if (isLeaf)
b_len--;
if (getLevel() == getMaxLevels())
isLeaf = true;
}
protected abstract SpatialPrefixTree getGrid();
protected abstract int getMaxLevels();
@Override
public SpatialRelation getShapeRel() {
return shapeRel;
}
@Override
public void setShapeRel(SpatialRelation rel) {
this.shapeRel = rel;
}
@Override
public boolean isLeaf() {
return isLeaf;
}
@Override
public void setLeaf() {
isLeaf = true;
}
@Override
public BytesRef getTokenBytesWithLeaf(BytesRef result) {
result = getTokenBytesNoLeaf(result);
if (!isLeaf || getLevel() == getMaxLevels())
return result;
if (result.bytes.length < result.offset + result.length + 1) {
assert false : "Not supposed to happen; performance bug";
byte[] copy = new byte[result.length + 1];
System.arraycopy(result.bytes, result.offset, copy, 0, result.length - 1);
result.bytes = copy;
result.offset = 0;
}
result.bytes[result.offset + result.length++] = LEAF_BYTE;
return result;
}
@Override
public BytesRef getTokenBytesNoLeaf(BytesRef result) {
if (result == null)
return new BytesRef(bytes, b_off, b_len);
result.bytes = bytes;
result.offset = b_off;
result.length = b_len;
return result;
}
@Override
public int getLevel() {
return b_len;
}
@Override
public CellIterator getNextLevelCells(Shape shapeFilter) {
assert getLevel() < getGrid().getMaxLevels();
if (shapeFilter instanceof Point) {
LegacyCell cell = getSubCell((Point) shapeFilter);
cell.shapeRel = SpatialRelation.CONTAINS;
return new SingletonCellIterator(cell);
} else {
return new FilterCellIterator(getSubCells().iterator(), shapeFilter);
}
}
/**
* Performant implementations are expected to implement this efficiently by
* considering the current cell's boundary.
* <p>
* Precondition: Never called when getLevel() == maxLevel.
* Precondition: this.getShape().relate(p) != DISJOINT.
*/
protected abstract LegacyCell getSubCell(Point p);
/**
* Gets the cells at the next grid cell level that covers this cell.
* Precondition: Never called when getLevel() == maxLevel.
*
* @return A set of cells (no dups), sorted, modifiable, not empty, not null.
*/
protected abstract Collection<Cell> getSubCells();
/**
* {@link #getSubCells()}.size() -- usually a constant. Should be &gt;=2
*/
public abstract int getSubCellsSize();
@Override
public boolean isPrefixOf(Cell c) {
//Note: this only works when each level uses a whole number of bytes.
LegacyCell cell = (LegacyCell)c;
boolean result = sliceEquals(cell.bytes, cell.b_off, cell.b_len, bytes, b_off, b_len);
assert result == StringHelper.startsWith(c.getTokenBytesNoLeaf(null), getTokenBytesNoLeaf(null));
return result;
}
/** Copied from {@link org.apache.lucene.util.StringHelper#startsWith(org.apache.lucene.util.BytesRef, org.apache.lucene.util.BytesRef)}
* which calls this. This is to avoid creating a BytesRef. */
private static boolean sliceEquals(byte[] sliceToTest_bytes, int sliceToTest_offset, int sliceToTest_length,
byte[] other_bytes, int other_offset, int other_length) {
if (sliceToTest_length < other_length) {
return false;
}
int i = sliceToTest_offset;
int j = other_offset;
final int k = other_offset + other_length;
while (j < k) {
if (sliceToTest_bytes[i++] != other_bytes[j++]) {
return false;
}
}
return true;
}
@Override
public int compareToNoLeaf(Cell fromCell) {
LegacyCell b = (LegacyCell) fromCell;
return compare(bytes, b_off, b_len, b.bytes, b.b_off, b.b_len);
}
/** Copied from {@link org.apache.lucene.util.BytesRef#compareTo(org.apache.lucene.util.BytesRef)}.
* This is to avoid creating a BytesRef. */
protected static int compare(byte[] aBytes, int aUpto, int a_length, byte[] bBytes, int bUpto, int b_length) {
final int aStop = aUpto + Math.min(a_length, b_length);
while(aUpto < aStop) {
int aByte = aBytes[aUpto++] & 0xff;
int bByte = bBytes[bUpto++] & 0xff;
int diff = aByte - bByte;
if (diff != 0) {
return diff;
}
}
// One is a prefix of the other, or, they are equal:
return a_length - b_length;
}
@Override
public boolean equals(Object obj) {
//this method isn't "normally" called; just in asserts/tests
if (obj instanceof Cell) {
Cell cell = (Cell) obj;
return getTokenBytesWithLeaf(null).equals(cell.getTokenBytesWithLeaf(null));
} else {
return false;
}
}
@Override
public int hashCode() {
return getTokenBytesWithLeaf(null).hashCode();
}
@Override
public String toString() {
//this method isn't "normally" called; just in asserts/tests
return getTokenBytesWithLeaf(null).utf8ToString();
}
}

View File

@ -0,0 +1,435 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.spatial.prefix.tree;
import com.spatial4j.core.context.SpatialContext;
import com.spatial4j.core.shape.Point;
import com.spatial4j.core.shape.Rectangle;
import com.spatial4j.core.shape.Shape;
import com.spatial4j.core.shape.SpatialRelation;
import com.spatial4j.core.shape.impl.RectangleImpl;
import org.apache.lucene.util.BytesRef;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.NoSuchElementException;
/**
* Subclassing QuadPrefixTree this {@link SpatialPrefixTree} uses the compact QuadCell encoding described in
* {@link PackedQuadCell}
*
* @lucene.experimental
*
* NOTE: Will be removed upon commit of LUCENE-6422
*/
public class PackedQuadPrefixTree extends QuadPrefixTree {
public static final byte[] QUAD = new byte[] {0x00, 0x01, 0x02, 0x03};
public static final int MAX_LEVELS_POSSIBLE = 29;
private boolean leafyPrune = true;
public static class Factory extends QuadPrefixTree.Factory {
@Override
protected SpatialPrefixTree newSPT() {
if (maxLevels > MAX_LEVELS_POSSIBLE) {
throw new IllegalArgumentException("maxLevels " + maxLevels + " exceeds maximum value " + MAX_LEVELS_POSSIBLE);
}
return new PackedQuadPrefixTree(ctx, maxLevels);
}
}
public PackedQuadPrefixTree(SpatialContext ctx, int maxLevels) {
super(ctx, maxLevels);
}
@Override
public Cell getWorldCell() {
return new PackedQuadCell(0x0L);
}
@Override
public Cell getCell(Point p, int level) {
List<Cell> cells = new ArrayList<>(1);
build(xmid, ymid, 0, cells, 0x0L, ctx.makePoint(p.getX(),p.getY()), level);
return cells.get(0);//note cells could be longer if p on edge
}
protected void build(double x, double y, int level, List<Cell> matches, long term, Shape shape, int maxLevel) {
double w = levelW[level] / 2;
double h = levelH[level] / 2;
// Z-Order
// http://en.wikipedia.org/wiki/Z-order_%28curve%29
checkBattenberg(QUAD[0], x - w, y + h, level, matches, term, shape, maxLevel);
checkBattenberg(QUAD[1], x + w, y + h, level, matches, term, shape, maxLevel);
checkBattenberg(QUAD[2], x - w, y - h, level, matches, term, shape, maxLevel);
checkBattenberg(QUAD[3], x + w, y - h, level, matches, term, shape, maxLevel);
}
protected void checkBattenberg(byte quad, double cx, double cy, int level, List<Cell> matches,
long term, Shape shape, int maxLevel) {
// short-circuit if we find a match for the point (no need to continue recursion)
if (shape instanceof Point && !matches.isEmpty())
return;
double w = levelW[level] / 2;
double h = levelH[level] / 2;
SpatialRelation v = shape.relate(ctx.makeRectangle(cx - w, cx + w, cy - h, cy + h));
if (SpatialRelation.DISJOINT == v) {
return;
}
// set bits for next level
term |= (((long)(quad))<<(64-(++level<<1)));
// increment level
term = ((term>>>1)+1)<<1;
if (SpatialRelation.CONTAINS == v || (level >= maxLevel)) {
matches.add(new PackedQuadCell(term, v.transpose()));
} else {// SpatialRelation.WITHIN, SpatialRelation.INTERSECTS
build(cx, cy, level, matches, term, shape, maxLevel);
}
}
@Override
public Cell readCell(BytesRef term, Cell scratch) {
PackedQuadCell cell = (PackedQuadCell) scratch;
if (cell == null)
cell = (PackedQuadCell) getWorldCell();
cell.readCell(term);
return cell;
}
@Override
public CellIterator getTreeCellIterator(Shape shape, int detailLevel) {
return new PrefixTreeIterator(shape);
}
public void setPruneLeafyBranches( boolean pruneLeafyBranches ) {
this.leafyPrune = pruneLeafyBranches;
}
/**
* PackedQuadCell Binary Representation is as follows
* CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCDDDDDL
*
* Where C = Cell bits (2 per quad)
* D = Depth bits (5 with max of 29 levels)
* L = isLeaf bit
*/
public class PackedQuadCell extends QuadCell {
private long term;
PackedQuadCell(long term) {
super(null, 0, 0);
this.term = term;
this.b_off = 0;
this.bytes = longToByteArray(this.term);
this.b_len = 8;
readLeafAdjust();
}
PackedQuadCell(long term, SpatialRelation shapeRel) {
this(term);
this.shapeRel = shapeRel;
}
@Override
protected void readCell(BytesRef bytes) {
shapeRel = null;
shape = null;
this.bytes = bytes.bytes;
this.b_off = bytes.offset;
this.b_len = (short) bytes.length;
this.term = longFromByteArray(this.bytes, bytes.offset);
readLeafAdjust();
}
private final int getShiftForLevel(final int level) {
return 64 - (level<<1);
}
public boolean isEnd(final int level, final int shift) {
return (term != 0x0L && ((((0x1L<<(level<<1))-1)-(term>>>shift)) == 0x0L));
}
/**
* Get the next cell in the tree without using recursion. descend parameter requests traversal to the child nodes,
* setting this to false will step to the next sibling.
* Note: This complies with lexicographical ordering, once you've moved to the next sibling there is no backtracking.
*/
public PackedQuadCell nextCell(boolean descend) {
final int level = getLevel();
final int shift = getShiftForLevel(level);
// base case: can't go further
if ( (!descend && isEnd(level, shift)) || isEnd(maxLevels, getShiftForLevel(maxLevels))) {
return null;
}
long newTerm;
final boolean isLeaf = (term&0x1L)==0x1L;
// if descend requested && we're not at the maxLevel
if ((descend && !isLeaf && (level != maxLevels)) || level == 0) {
// simple case: increment level bits (next level)
newTerm = ((term>>>1)+0x1L)<<1;
} else { // we're not descending or we can't descend
newTerm = term + (0x1L<<shift);
// we're at the last sibling...force descend
if (((term>>>shift)&0x3L) == 0x3L) {
// adjust level for number popping up
newTerm = ((newTerm>>>1) - (Long.numberOfTrailingZeros(newTerm>>>shift)>>>1))<<1;
}
}
return new PackedQuadCell(newTerm);
}
@Override
protected void readLeafAdjust() {
isLeaf = ((0x1L)&term) == 0x1L;
if (getLevel() == getMaxLevels()) {
isLeaf = true;
}
}
@Override
public BytesRef getTokenBytesWithLeaf(BytesRef result) {
if (isLeaf) {
term |= 0x1L;
}
return getTokenBytesNoLeaf(result);
}
@Override
public BytesRef getTokenBytesNoLeaf(BytesRef result) {
if (result == null)
return new BytesRef(bytes, b_off, b_len);
result.bytes = longToByteArray(this.term);
result.offset = 0;
result.length = result.bytes.length;
return result;
}
@Override
public int compareToNoLeaf(Cell fromCell) {
PackedQuadCell b = (PackedQuadCell) fromCell;
final long thisTerm = (((0x1L)&term) == 0x1L) ? term-1 : term;
final long fromTerm = (((0x1L)&b.term) == 0x1L) ? b.term-1 : b.term;
final int result = compare(longToByteArray(thisTerm), 0, 8, longToByteArray(fromTerm), 0, 8);
return result;
}
@Override
public int getLevel() {
int l = (int)((term >>> 1)&0x1FL);
return l;
}
@Override
protected Collection<Cell> getSubCells() {
List<Cell> cells = new ArrayList<>(4);
PackedQuadCell pqc = (PackedQuadCell)(new PackedQuadCell(((term&0x1)==0x1) ? this.term-1 : this.term))
.nextCell(true);
cells.add(pqc);
cells.add((pqc = (PackedQuadCell) (pqc.nextCell(false))));
cells.add((pqc = (PackedQuadCell) (pqc.nextCell(false))));
cells.add(pqc.nextCell(false));
return cells;
}
@Override
protected QuadCell getSubCell(Point p) {
return (PackedQuadCell) PackedQuadPrefixTree.this.getCell(p, getLevel() + 1);//not performant!
}
@Override
public boolean isPrefixOf(Cell c) {
PackedQuadCell cell = (PackedQuadCell)c;
return (this.term==0x0L) ? true : isInternalPrefix(cell);
}
protected boolean isInternalPrefix(PackedQuadCell c) {
final int shift = 64 - (getLevel()<<1);
return ((term>>>shift)-(c.term>>>shift)) == 0x0L;
}
protected long concat(byte postfix) {
// extra leaf bit
return this.term | (((long)(postfix))<<((getMaxLevels()-getLevel()<<1)+6));
}
/**
* Constructs a bounding box shape out of the encoded cell
*/
@Override
protected Rectangle makeShape() {
double xmin = PackedQuadPrefixTree.this.xmin;
double ymin = PackedQuadPrefixTree.this.ymin;
int level = getLevel();
byte b;
for (short l=0, i=1; l<level; ++l, ++i) {
b = (byte) ((term>>>(64-(i<<1))) & 0x3L);
switch (b) {
case 0x00:
ymin += levelH[l];
break;
case 0x01:
xmin += levelW[l];
ymin += levelH[l];
break;
case 0x02:
break;//nothing really
case 0x03:
xmin += levelW[l];
break;
default:
throw new RuntimeException("unexpected quadrant");
}
}
double width, height;
if (level > 0) {
width = levelW[level - 1];
height = levelH[level - 1];
} else {
width = gridW;
height = gridH;
}
return new RectangleImpl(xmin, xmin + width, ymin, ymin + height, ctx);
}
private long fromBytes(byte b1, byte b2, byte b3, byte b4, byte b5, byte b6, byte b7, byte b8) {
return ((long)b1 & 255L) << 56 | ((long)b2 & 255L) << 48 | ((long)b3 & 255L) << 40
| ((long)b4 & 255L) << 32 | ((long)b5 & 255L) << 24 | ((long)b6 & 255L) << 16
| ((long)b7 & 255L) << 8 | (long)b8 & 255L;
}
private byte[] longToByteArray(long value) {
byte[] result = new byte[8];
for(int i = 7; i >= 0; --i) {
result[i] = (byte)((int)(value & 255L));
value >>= 8;
}
return result;
}
private long longFromByteArray(byte[] bytes, int ofs) {
assert bytes.length >= 8;
return fromBytes(bytes[0+ofs], bytes[1+ofs], bytes[2+ofs], bytes[3+ofs],
bytes[4+ofs], bytes[5+ofs], bytes[6+ofs], bytes[7+ofs]);
}
/**
* Used for debugging, this will print the bits of the cell
*/
@Override
public String toString() {
String s = "";
for(int i = 0; i < Long.numberOfLeadingZeros(term); i++) {
s+='0';
}
if (term != 0)
s += Long.toBinaryString(term);
return s;
}
} // PackedQuadCell
protected class PrefixTreeIterator extends CellIterator {
private Shape shape;
private PackedQuadCell thisCell;
private PackedQuadCell nextCell;
private short leaves;
private short level;
private final short maxLevels;
private CellIterator pruneIter;
PrefixTreeIterator(Shape shape) {
this.shape = shape;
this.thisCell = ((PackedQuadCell)(getWorldCell())).nextCell(true);
this.maxLevels = (short)thisCell.getMaxLevels();
this.nextCell = null;
}
@Override
public boolean hasNext() {
if (nextCell != null) {
return true;
}
SpatialRelation rel;
// loop until we're at the end of the quad tree or we hit a relation
while (thisCell != null) {
rel = thisCell.getShape().relate(shape);
if (rel == SpatialRelation.DISJOINT) {
thisCell = thisCell.nextCell(false);
} else { // within || intersects || contains
thisCell.setShapeRel(rel);
nextCell = thisCell;
if (rel == SpatialRelation.WITHIN) {
thisCell.setLeaf();
thisCell = thisCell.nextCell(false);
} else { // intersects || contains
level = (short) (thisCell.getLevel());
if (level == maxLevels || pruned(rel)) {
thisCell.setLeaf();
if (shape instanceof Point) {
thisCell.setShapeRel(SpatialRelation.WITHIN);
thisCell = null;
} else {
thisCell = thisCell.nextCell(false);
}
break;
}
thisCell = thisCell.nextCell(true);
}
break;
}
}
return nextCell != null;
}
private boolean pruned(SpatialRelation rel) {
if (rel == SpatialRelation.INTERSECTS && leafyPrune && level == maxLevels-1) {
for (leaves=0, pruneIter=thisCell.getNextLevelCells(shape); pruneIter.hasNext(); pruneIter.next(), ++leaves);
return leaves == 4;
}
return false;
}
@Override
public Cell next() {
if (nextCell == null) {
if (!hasNext()) {
throw new NoSuchElementException();
}
}
// overriding since this implementation sets thisCell in hasNext
Cell temp = nextCell;
nextCell = null;
return temp;
}
@Override
public void remove() {
//no-op
}
}
}

View File

@ -0,0 +1,313 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.spatial.prefix.tree;
import com.spatial4j.core.context.SpatialContext;
import com.spatial4j.core.shape.Point;
import com.spatial4j.core.shape.Rectangle;
import com.spatial4j.core.shape.Shape;
import com.spatial4j.core.shape.SpatialRelation;
import org.apache.lucene.util.BytesRef;
import java.io.PrintStream;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Locale;
/**
* A {@link SpatialPrefixTree} which uses a
* <a href="http://en.wikipedia.org/wiki/Quadtree">quad tree</a> in which an
* indexed term will be generated for each cell, 'A', 'B', 'C', 'D'.
*
* @lucene.experimental
*
* NOTE: Will be removed upon commit of LUCENE-6422
*/
public class QuadPrefixTree extends LegacyPrefixTree {
/**
* Factory for creating {@link QuadPrefixTree} instances with useful defaults
*/
public static class Factory extends SpatialPrefixTreeFactory {
@Override
protected int getLevelForDistance(double degrees) {
QuadPrefixTree grid = new QuadPrefixTree(ctx, MAX_LEVELS_POSSIBLE);
return grid.getLevelForDistance(degrees);
}
@Override
protected SpatialPrefixTree newSPT() {
return new QuadPrefixTree(ctx,
maxLevels != null ? maxLevels : MAX_LEVELS_POSSIBLE);
}
}
public static final int MAX_LEVELS_POSSIBLE = 50;//not really sure how big this should be
public static final int DEFAULT_MAX_LEVELS = 12;
protected final double xmin;
protected final double xmax;
protected final double ymin;
protected final double ymax;
protected final double xmid;
protected final double ymid;
protected final double gridW;
public final double gridH;
final double[] levelW;
final double[] levelH;
final int[] levelS; // side
final int[] levelN; // number
public QuadPrefixTree(
SpatialContext ctx, Rectangle bounds, int maxLevels) {
super(ctx, maxLevels);
this.xmin = bounds.getMinX();
this.xmax = bounds.getMaxX();
this.ymin = bounds.getMinY();
this.ymax = bounds.getMaxY();
levelW = new double[maxLevels];
levelH = new double[maxLevels];
levelS = new int[maxLevels];
levelN = new int[maxLevels];
gridW = xmax - xmin;
gridH = ymax - ymin;
this.xmid = xmin + gridW/2.0;
this.ymid = ymin + gridH/2.0;
levelW[0] = gridW/2.0;
levelH[0] = gridH/2.0;
levelS[0] = 2;
levelN[0] = 4;
for (int i = 1; i < levelW.length; i++) {
levelW[i] = levelW[i - 1] / 2.0;
levelH[i] = levelH[i - 1] / 2.0;
levelS[i] = levelS[i - 1] * 2;
levelN[i] = levelN[i - 1] * 4;
}
}
public QuadPrefixTree(SpatialContext ctx) {
this(ctx, DEFAULT_MAX_LEVELS);
}
public QuadPrefixTree(
SpatialContext ctx, int maxLevels) {
this(ctx, ctx.getWorldBounds(), maxLevels);
}
@Override
public Cell getWorldCell() {
return new QuadCell(BytesRef.EMPTY_BYTES, 0, 0);
}
public void printInfo(PrintStream out) {
NumberFormat nf = NumberFormat.getNumberInstance(Locale.ROOT);
nf.setMaximumFractionDigits(5);
nf.setMinimumFractionDigits(5);
nf.setMinimumIntegerDigits(3);
for (int i = 0; i < maxLevels; i++) {
out.println(i + "]\t" + nf.format(levelW[i]) + "\t" + nf.format(levelH[i]) + "\t" +
levelS[i] + "\t" + (levelS[i] * levelS[i]));
}
}
@Override
public int getLevelForDistance(double dist) {
if (dist == 0)//short circuit
return maxLevels;
for (int i = 0; i < maxLevels-1; i++) {
//note: level[i] is actually a lookup for level i+1
if(dist > levelW[i] && dist > levelH[i]) {
return i+1;
}
}
return maxLevels;
}
@Override
public Cell getCell(Point p, int level) {
List<Cell> cells = new ArrayList<>(1);
build(xmid, ymid, 0, cells, new BytesRef(maxLevels+1), ctx.makePoint(p.getX(),p.getY()), level);
return cells.get(0);//note cells could be longer if p on edge
}
private void build(
double x,
double y,
int level,
List<Cell> matches,
BytesRef str,
Shape shape,
int maxLevel) {
assert str.length == level;
double w = levelW[level] / 2;
double h = levelH[level] / 2;
// Z-Order
// http://en.wikipedia.org/wiki/Z-order_%28curve%29
checkBattenberg('A', x - w, y + h, level, matches, str, shape, maxLevel);
checkBattenberg('B', x + w, y + h, level, matches, str, shape, maxLevel);
checkBattenberg('C', x - w, y - h, level, matches, str, shape, maxLevel);
checkBattenberg('D', x + w, y - h, level, matches, str, shape, maxLevel);
// possibly consider hilbert curve
// http://en.wikipedia.org/wiki/Hilbert_curve
// http://blog.notdot.net/2009/11/Damn-Cool-Algorithms-Spatial-indexing-with-Quadtrees-and-Hilbert-Curves
// if we actually use the range property in the query, this could be useful
}
protected void checkBattenberg(
char c,
double cx,
double cy,
int level,
List<Cell> matches,
BytesRef str,
Shape shape,
int maxLevel) {
assert str.length == level;
assert str.offset == 0;
double w = levelW[level] / 2;
double h = levelH[level] / 2;
int strlen = str.length;
Rectangle rectangle = ctx.makeRectangle(cx - w, cx + w, cy - h, cy + h);
SpatialRelation v = shape.relate(rectangle);
if (SpatialRelation.CONTAINS == v) {
str.bytes[str.length++] = (byte)c;//append
//str.append(SpatialPrefixGrid.COVER);
matches.add(new QuadCell(BytesRef.deepCopyOf(str), v.transpose()));
} else if (SpatialRelation.DISJOINT == v) {
// nothing
} else { // SpatialRelation.WITHIN, SpatialRelation.INTERSECTS
str.bytes[str.length++] = (byte)c;//append
int nextLevel = level+1;
if (nextLevel >= maxLevel) {
//str.append(SpatialPrefixGrid.INTERSECTS);
matches.add(new QuadCell(BytesRef.deepCopyOf(str), v.transpose()));
} else {
build(cx, cy, nextLevel, matches, str, shape, maxLevel);
}
}
str.length = strlen;
}
protected class QuadCell extends LegacyCell {
QuadCell(byte[] bytes, int off, int len) {
super(bytes, off, len);
}
QuadCell(BytesRef str, SpatialRelation shapeRel) {
this(str.bytes, str.offset, str.length);
this.shapeRel = shapeRel;
}
@Override
protected QuadPrefixTree getGrid() { return QuadPrefixTree.this; }
@Override
protected int getMaxLevels() { return maxLevels; }
@Override
protected Collection<Cell> getSubCells() {
BytesRef source = getTokenBytesNoLeaf(null);
List<Cell> cells = new ArrayList<>(4);
cells.add(new QuadCell(concat(source, (byte)'A'), null));
cells.add(new QuadCell(concat(source, (byte)'B'), null));
cells.add(new QuadCell(concat(source, (byte)'C'), null));
cells.add(new QuadCell(concat(source, (byte)'D'), null));
return cells;
}
protected BytesRef concat(BytesRef source, byte b) {
//+2 for new char + potential leaf
final byte[] buffer = Arrays.copyOfRange(source.bytes, source.offset, source.offset + source.length + 2);
BytesRef target = new BytesRef(buffer);
target.length = source.length;
target.bytes[target.length++] = b;
return target;
}
@Override
public int getSubCellsSize() {
return 4;
}
@Override
protected QuadCell getSubCell(Point p) {
return (QuadCell) QuadPrefixTree.this.getCell(p, getLevel() + 1);//not performant!
}
@Override
public Shape getShape() {
if (shape == null)
shape = makeShape();
return shape;
}
protected Rectangle makeShape() {
BytesRef token = getTokenBytesNoLeaf(null);
double xmin = QuadPrefixTree.this.xmin;
double ymin = QuadPrefixTree.this.ymin;
for (int i = 0; i < token.length; i++) {
byte c = token.bytes[token.offset + i];
switch (c) {
case 'A':
ymin += levelH[i];
break;
case 'B':
xmin += levelW[i];
ymin += levelH[i];
break;
case 'C':
break;//nothing really
case 'D':
xmin += levelW[i];
break;
default:
throw new RuntimeException("unexpected char: " + c);
}
}
int len = token.length;
double width, height;
if (len > 0) {
width = levelW[len-1];
height = levelH[len-1];
} else {
width = gridW;
height = gridH;
}
return ctx.makeRectangle(xmin, xmin + width, ymin, ymin + height);
}
}//QuadCell
}

View File

@ -485,7 +485,7 @@ public class Version {
}
String[] parts = version.split("\\.");
if (parts.length < 3 || parts.length > 4) {
throw new IllegalArgumentException("the version needs to contain major, minor and revision, and optionally the build");
throw new IllegalArgumentException("the version needs to contain major, minor and revision, and optionally the build: " + version);
}
try {

View File

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.BaseTransportResponseHandler;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportResponse;
/**
* A simple base class for action response listeners, defaulting to using the SAME executor (as its
* very common on response handlers).
*/
public abstract class ActionListenerResponseHandler<Response extends TransportResponse> extends BaseTransportResponseHandler<Response> {
private final ActionListener<Response> listener;
public ActionListenerResponseHandler(ActionListener<Response> listener) {
this.listener = listener;
}
@Override
public void handleResponse(Response response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException e) {
listener.onFailure(e);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}

View File

@ -1,40 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchWrapperException;
import org.elasticsearch.common.Nullable;
public class WriteFailureException extends ElasticsearchException implements ElasticsearchWrapperException {
@Nullable
private final String mappingTypeToUpdate;
public WriteFailureException(Throwable cause, String mappingTypeToUpdate) {
super(null, cause);
assert cause != null;
this.mappingTypeToUpdate = mappingTypeToUpdate;
}
public String getMappingTypeToUpdate() {
return mappingTypeToUpdate;
}
}

View File

@ -67,7 +67,7 @@ public class TransportNodesHotThreadsAction extends TransportNodesOperationActio
}
@Override
protected NodesHotThreadsRequest newRequest() {
protected NodesHotThreadsRequest newRequestInstance() {
return new NodesHotThreadsRequest();
}

View File

@ -71,7 +71,7 @@ public class TransportNodesInfoAction extends TransportNodesOperationAction<Node
}
@Override
protected NodesInfoRequest newRequest() {
protected NodesInfoRequest newRequestInstance() {
return new NodesInfoRequest();
}

View File

@ -71,7 +71,7 @@ public class TransportNodesStatsAction extends TransportNodesOperationAction<Nod
}
@Override
protected NodesStatsRequest newRequest() {
protected NodesStatsRequest newRequestInstance() {
return new NodesStatsRequest();
}

View File

@ -70,7 +70,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesOperationAction
}
@Override
protected Request newRequest() {
protected Request newRequestInstance() {
return new Request();
}

View File

@ -91,7 +91,7 @@ public class TransportClusterStatsAction extends TransportNodesOperationAction<C
}
@Override
protected ClusterStatsRequest newRequest() {
protected ClusterStatsRequest newRequestInstance() {
return new ClusterStatsRequest();
}

View File

@ -53,29 +53,23 @@ public class AnalyzeRequest extends SingleCustomOperationRequest<AnalyzeRequest>
}
/**
* Constructs a new analyzer request for the provided text.
* Constructs a new analyzer request for the provided index.
*
* @param text The text to analyze
* @param index The text to analyze
*/
public AnalyzeRequest(String text) {
this.text = text;
}
/**
* Constructs a new analyzer request for the provided index and text.
*
* @param index The index name
* @param text The text to analyze
*/
public AnalyzeRequest(@Nullable String index, String text) {
public AnalyzeRequest(String index) {
this.index(index);
this.text = text;
}
public String text() {
return this.text;
}
public AnalyzeRequest text(String text) {
this.text = text;
return this;
}
public AnalyzeRequest analyzer(String analyzer) {
this.analyzer = analyzer;
return this;

View File

@ -32,7 +32,7 @@ public class AnalyzeRequestBuilder extends SingleCustomOperationRequestBuilder<A
}
public AnalyzeRequestBuilder(IndicesAdminClient indicesClient, String index, String text) {
super(indicesClient, new AnalyzeRequest(index, text));
super(indicesClient, new AnalyzeRequest(index).text(text));
}
/**

View File

@ -69,7 +69,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio
}
@Override
protected ClearIndicesCacheRequest newRequest() {
protected ClearIndicesCacheRequest newRequestInstance() {
return new ClearIndicesCacheRequest();
}

View File

@ -62,7 +62,7 @@ public class TransportFlushAction extends TransportBroadcastOperationAction<Flus
}
@Override
protected FlushRequest newRequest() {
protected FlushRequest newRequestInstance() {
return new FlushRequest();
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.indices.mapping.get;
import com.google.common.base.Predicate;
import com.google.common.collect.Collections2;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData;
import org.elasticsearch.action.support.ActionFilters;
@ -187,7 +188,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleCustomO
} else if (Regex.isSimpleMatchPattern(field)) {
// go through the field mappers 3 times, to make sure we give preference to the resolve order: full name, index name, name.
// also make sure we only store each mapper once.
Collection<FieldMapper<?>> remainingFieldMappers = new LinkedList<>(allFieldMappers);
Collection<FieldMapper<?>> remainingFieldMappers = Lists.newLinkedList(allFieldMappers);
for (Iterator<FieldMapper<?>> it = remainingFieldMappers.iterator(); it.hasNext(); ) {
final FieldMapper<?> fieldMapper = it.next();
if (Regex.simpleMatch(field, fieldMapper.names().fullName())) {

View File

@ -63,7 +63,7 @@ public class TransportOptimizeAction extends TransportBroadcastOperationAction<O
}
@Override
protected OptimizeRequest newRequest() {
protected OptimizeRequest newRequestInstance() {
return new OptimizeRequest();
}

View File

@ -71,7 +71,7 @@ public class TransportRecoveryAction extends
}
@Override
protected RecoveryRequest newRequest() {
protected RecoveryRequest newRequestInstance() {
return new RecoveryRequest();
}

View File

@ -63,7 +63,7 @@ public class TransportRefreshAction extends TransportBroadcastOperationAction<Re
}
@Override
protected RefreshRequest newRequest() {
protected RefreshRequest newRequestInstance() {
return new RefreshRequest();
}

View File

@ -33,6 +33,8 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.ShardId;
@ -41,6 +43,7 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.atomic.AtomicReferenceArray;
@ -66,7 +69,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastOperationA
}
@Override
protected IndicesSegmentsRequest newRequest() {
protected IndicesSegmentsRequest newRequestInstance() {
return new IndicesSegmentsRequest();
}
@ -135,7 +138,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastOperationA
}
static class IndexShardSegmentRequest extends BroadcastShardOperationRequest {
final boolean verbose;
boolean verbose;
IndexShardSegmentRequest() {
verbose = false;
@ -145,5 +148,17 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastOperationA
super(shardId, request);
verbose = request.verbose();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(verbose);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
verbose = in.readBoolean();
}
}
}

View File

@ -21,11 +21,13 @@ package org.elasticsearch.action.admin.indices.stats;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.engine.CommitStats;
import org.elasticsearch.index.shard.IndexShard;
import java.io.IOException;
@ -38,7 +40,10 @@ public class ShardStats extends BroadcastShardOperationResponse implements ToXCo
private ShardRouting shardRouting;
CommonStats stats;
CommonStats commonStats;
@Nullable
CommitStats commitStats;
ShardStats() {
}
@ -46,7 +51,8 @@ public class ShardStats extends BroadcastShardOperationResponse implements ToXCo
public ShardStats(IndexShard indexShard, ShardRouting shardRouting, CommonStatsFlags flags) {
super(indexShard.shardId());
this.shardRouting = shardRouting;
this.stats = new CommonStats(indexShard, flags);
this.commonStats = new CommonStats(indexShard, flags);
this.commitStats = indexShard.commitStats();
}
/**
@ -57,7 +63,11 @@ public class ShardStats extends BroadcastShardOperationResponse implements ToXCo
}
public CommonStats getStats() {
return this.stats;
return this.commonStats;
}
public CommitStats getCommitStats() {
return this.commitStats;
}
public static ShardStats readShardStats(StreamInput in) throws IOException {
@ -70,14 +80,16 @@ public class ShardStats extends BroadcastShardOperationResponse implements ToXCo
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardRouting = readShardRoutingEntry(in);
stats = CommonStats.readCommonStats(in);
commonStats = CommonStats.readCommonStats(in);
commitStats = CommitStats.readOptionalCommitStatsFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardRouting.writeTo(out);
stats.writeTo(out);
commonStats.writeTo(out);
out.writeOptionalStreamable(commitStats);
}
@Override
@ -89,7 +101,10 @@ public class ShardStats extends BroadcastShardOperationResponse implements ToXCo
.field(Fields.RELOCATING_NODE, shardRouting.relocatingNodeId())
.endObject();
stats.toXContent(builder, params);
commonStats.toXContent(builder, params);
if (commitStats != null) {
commitStats.toXContent(builder, params);
}
return builder;
}

View File

@ -70,7 +70,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastOperationActi
}
@Override
protected IndicesStatsRequest newRequest() {
protected IndicesStatsRequest newRequestInstance() {
return new IndicesStatsRequest();
}

View File

@ -37,7 +37,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.MatchNoDocsFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService;
@ -96,7 +95,7 @@ public class TransportValidateQueryAction extends TransportBroadcastOperationAct
}
@Override
protected ValidateQueryRequest newRequest() {
protected ValidateQueryRequest newRequestInstance() {
return new ValidateQueryRequest();
}
@ -219,7 +218,7 @@ public class TransportValidateQueryAction extends TransportBroadcastOperationAct
private String getRewrittenQuery(IndexSearcher searcher, Query query) throws IOException {
Query queryRewrite = searcher.rewrite(query);
if (queryRewrite instanceof MatchNoDocsQuery || queryRewrite instanceof MatchNoDocsFilter) {
if (queryRewrite instanceof MatchNoDocsQuery) {
return query.toString();
} else {
return queryRewrite.toString();

View File

@ -19,14 +19,12 @@
package org.elasticsearch.action.bulk;
import com.google.common.collect.Sets;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchIllegalStateException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionWriteResponse;
import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.WriteFailureException;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexRequest;
@ -44,26 +42,27 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedString;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.river.RiverIndexName;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService;
import java.util.Map;
import java.util.Set;
/**
* Performs the index operation.
@ -134,7 +133,6 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
final BulkShardRequest request = shardRequest.request;
IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id());
final Set<String> mappingTypesToUpdate = Sets.newHashSet();
long[] preVersions = new long[request.items().length];
VersionType[] preVersionTypes = new VersionType[request.items().length];
@ -145,20 +143,10 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
preVersions[requestIndex] = indexRequest.version();
preVersionTypes[requestIndex] = indexRequest.versionType();
try {
try {
WriteResult result = shardIndexOperation(request, indexRequest, clusterState, indexShard, true);
WriteResult result = shardIndexOperation(request, indexRequest, clusterState, indexShard, indexService, true);
// add the response
IndexResponse indexResponse = result.response();
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse));
if (result.mappingTypeToUpdate != null) {
mappingTypesToUpdate.add(result.mappingTypeToUpdate);
}
} catch (WriteFailureException e) {
if (e.getMappingTypeToUpdate() != null) {
mappingTypesToUpdate.add(e.getMappingTypeToUpdate());
}
throw e.getCause();
}
} catch (Throwable e) {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(e)) {
@ -166,12 +154,6 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
for (int j = 0; j < requestIndex; j++) {
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
}
for (String mappingTypeToUpdate : mappingTypesToUpdate) {
DocumentMapper docMapper = indexService.mapperService().documentMapper(mappingTypeToUpdate);
if (docMapper != null) {
mappingUpdatedAction.updateMappingOnMaster(indexService.index().name(), docMapper, indexService.indexUUID());
}
}
throw (ElasticsearchException) e;
}
if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
@ -230,7 +212,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) {
UpdateResult updateResult;
try {
updateResult = shardUpdateOperation(clusterState, request, updateRequest, indexShard);
updateResult = shardUpdateOperation(clusterState, request, updateRequest, indexShard, indexService);
} catch (Throwable t) {
updateResult = new UpdateResult(null, null, false, t, null);
}
@ -250,9 +232,6 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
}
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
if (result.mappingTypeToUpdate != null) {
mappingTypesToUpdate.add(result.mappingTypeToUpdate);
}
break;
case DELETE:
DeleteResponse response = updateResult.writeResult.response();
@ -331,13 +310,6 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
assert preVersionTypes[requestIndex] != null;
}
for (String mappingTypToUpdate : mappingTypesToUpdate) {
DocumentMapper docMapper = indexService.mapperService().documentMapper(mappingTypToUpdate);
if (docMapper != null) {
mappingUpdatedAction.updateMappingOnMaster(indexService.index().name(), docMapper, indexService.indexUUID());
}
}
if (request.refresh()) {
try {
indexShard.refresh("refresh_flag_bulk");
@ -363,12 +335,10 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
static class WriteResult {
final ActionWriteResponse response;
final String mappingTypeToUpdate;
final Engine.IndexingOperation op;
WriteResult(ActionWriteResponse response, String mappingTypeToUpdate, Engine.IndexingOperation op) {
WriteResult(ActionWriteResponse response, Engine.IndexingOperation op) {
this.response = response;
this.mappingTypeToUpdate = mappingTypeToUpdate;
this.op = op;
}
@ -382,8 +352,25 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
}
private void applyMappingUpdate(IndexService indexService, String type, Mapping update) throws Throwable {
// HACK: Rivers seem to have something specific that triggers potential
// deadlocks when doing concurrent indexing. So for now they keep the
// old behaviour of updating mappings locally first and then
// asynchronously notifying the master
// this can go away when rivers are removed
final String indexName = indexService.index().name();
final String indexUUID = indexService.indexUUID();
if (indexName.equals(RiverIndexName.Conf.indexName(settings))) {
indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true);
mappingUpdatedAction.updateMappingOnMaster(indexName, indexUUID, type, update, null);
} else {
mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexUUID, type, update);
indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true);
}
}
private WriteResult shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, ClusterState clusterState,
IndexShard indexShard, boolean processed) {
IndexShard indexShard, IndexService indexService, boolean processed) throws Throwable {
// validate, if routing is required, that we got routing
MappingMetaData mappingMd = clusterState.metaData().index(request.index()).mappingOrDefault(indexRequest.type());
@ -400,17 +387,13 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, indexRequest.source()).type(indexRequest.type()).id(indexRequest.id())
.routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl());
// update mapping on master if needed, we won't update changes to the same type, since once its changed, it won't have mappers added
String mappingTypeToUpdate = null;
long version;
boolean created;
Engine.IndexingOperation op;
try {
if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
Engine.Index index = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates() || indexRequest.canHaveDuplicates());
if (index.parsedDoc().mappingsModified()) {
mappingTypeToUpdate = indexRequest.type();
if (index.parsedDoc().dynamicMappingsUpdate() != null) {
applyMappingUpdate(indexService, indexRequest.type(), index.parsedDoc().dynamicMappingsUpdate());
}
indexShard.index(index);
version = index.version();
@ -419,8 +402,8 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
} else {
Engine.Create create = indexShard.prepareCreate(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY,
request.canHaveDuplicates() || indexRequest.canHaveDuplicates(), indexRequest.autoGeneratedId());
if (create.parsedDoc().mappingsModified()) {
mappingTypeToUpdate = indexRequest.type();
if (create.parsedDoc().dynamicMappingsUpdate() != null) {
applyMappingUpdate(indexService, indexRequest.type(), create.parsedDoc().dynamicMappingsUpdate());
}
indexShard.create(create);
version = create.version();
@ -430,15 +413,12 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
// update the version on request so it will happen on the replicas
indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery());
indexRequest.version(version);
} catch (Throwable t) {
throw new WriteFailureException(t, mappingTypeToUpdate);
}
assert indexRequest.versionType().validateVersionForWrites(indexRequest.version());
IndexResponse indexResponse = new IndexResponse(request.index(), indexRequest.type(), indexRequest.id(), version, created);
return new WriteResult(indexResponse, mappingTypeToUpdate, op);
return new WriteResult(indexResponse, op);
}
private WriteResult shardDeleteOperation(BulkShardRequest request, DeleteRequest deleteRequest, IndexShard indexShard) {
@ -451,7 +431,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version());
DeleteResponse deleteResponse = new DeleteResponse(request.index(), deleteRequest.type(), deleteRequest.id(), delete.version(), delete.found());
return new WriteResult(deleteResponse, null, null);
return new WriteResult(deleteResponse, null);
}
static class UpdateResult {
@ -507,14 +487,14 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
}
private UpdateResult shardUpdateOperation(ClusterState clusterState, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard) {
private UpdateResult shardUpdateOperation(ClusterState clusterState, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard, IndexService indexService) {
UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard);
switch (translate.operation()) {
case UPSERT:
case INDEX:
IndexRequest indexRequest = translate.action();
try {
WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, clusterState, indexShard, false);
WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, clusterState, indexShard, indexService, false);
return new UpdateResult(translate, indexRequest, result);
} catch (Throwable t) {
t = ExceptionsHelper.unwrapCause(t);

View File

@ -95,7 +95,7 @@ public class TransportCountAction extends TransportBroadcastOperationAction<Coun
}
@Override
protected CountRequest newRequest() {
protected CountRequest newRequestInstance() {
return new CountRequest();
}

View File

@ -95,7 +95,7 @@ public class TransportExistsAction extends TransportBroadcastOperationAction<Exi
}
@Override
protected ExistsRequest newRequest() {
protected ExistsRequest newRequestInstance() {
return new ExistsRequest();
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.index;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.WriteFailureException;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
@ -38,15 +37,17 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedString;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.river.RiverIndexName;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -166,6 +167,23 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi
.indexShards(clusterService.state(), request.concreteIndex(), request.request().type(), request.request().id(), request.request().routing());
}
private void applyMappingUpdate(IndexService indexService, String type, Mapping update) throws Throwable {
// HACK: Rivers seem to have something specific that triggers potential
// deadlocks when doing concurrent indexing. So for now they keep the
// old behaviour of updating mappings locally first and then
// asynchronously notifying the master
// this can go away when rivers are removed
final String indexName = indexService.index().name();
final String indexUUID = indexService.indexUUID();
if (indexName.equals(RiverIndexName.Conf.indexName(settings))) {
indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true);
mappingUpdatedAction.updateMappingOnMaster(indexName, indexUUID, type, update, null);
} else {
mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexUUID, type, update);
indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true);
}
}
@Override
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable {
final IndexRequest request = shardRequest.request;
@ -186,11 +204,10 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi
long version;
boolean created;
try {
if (request.opType() == IndexRequest.OpType.INDEX) {
Engine.Index index = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates());
if (index.parsedDoc().mappingsModified()) {
mappingUpdatedAction.updateMappingOnMaster(shardRequest.shardId.getIndex(), index.docMapper(), indexService.indexUUID());
if (index.parsedDoc().dynamicMappingsUpdate() != null) {
applyMappingUpdate(indexService, request.type(), index.parsedDoc().dynamicMappingsUpdate());
}
indexShard.index(index);
version = index.version();
@ -198,8 +215,8 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi
} else {
Engine.Create create = indexShard.prepareCreate(sourceToParse,
request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates(), request.autoGeneratedId());
if (create.parsedDoc().mappingsModified()) {
mappingUpdatedAction.updateMappingOnMaster(shardRequest.shardId.getIndex(), create.docMapper(), indexService.indexUUID());
if (create.parsedDoc().dynamicMappingsUpdate() != null) {
applyMappingUpdate(indexService, request.type(), create.parsedDoc().dynamicMappingsUpdate());
}
indexShard.create(create);
version = create.version();
@ -219,15 +236,6 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi
assert request.versionType().validateVersionForWrites(request.version());
return new Tuple<>(new IndexResponse(shardRequest.shardId.getIndex(), request.type(), request.id(), version, created), shardRequest.request);
} catch (WriteFailureException e) {
if (e.getMappingTypeToUpdate() != null){
DocumentMapper docMapper = indexService.mapperService().documentMapper(e.getMappingTypeToUpdate());
if (docMapper != null) {
mappingUpdatedAction.updateMappingOnMaster(indexService.index().name(), docMapper, indexService.indexUUID());
}
}
throw e.getCause();
}
}
@Override

View File

@ -148,9 +148,9 @@ public class TransportMoreLikeThisAction extends HandledTransportAction<MoreLike
final Set<String> fields = newHashSet();
if (request.fields() != null) {
for (String field : request.fields()) {
FieldMappers fieldMappers = docMapper.mappers().smartName(field);
if (fieldMappers != null) {
fields.add(fieldMappers.mapper().names().indexName());
FieldMapper fieldMapper = docMapper.mappers().smartNameFieldMapper(field);
if (fieldMapper != null) {
fields.add(fieldMapper.names().indexName());
} else {
fields.add(field);
}

View File

@ -101,7 +101,7 @@ public class TransportPercolateAction extends TransportBroadcastOperationAction<
}
@Override
protected PercolateRequest newRequest() {
protected PercolateRequest newRequestInstance() {
return new PercolateRequest();
}

View File

@ -35,6 +35,7 @@ import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.BaseTransportRequestHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
@ -105,10 +106,10 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
if (contexts.isEmpty()) {
for (final DiscoveryNode node : nodes) {
searchServiceTransportAction.sendClearAllScrollContexts(node, request, new ActionListener<Boolean>() {
searchServiceTransportAction.sendClearAllScrollContexts(node, request, new ActionListener<TransportResponse>() {
@Override
public void onResponse(Boolean freed) {
onFreedContext(freed);
public void onResponse(TransportResponse response) {
onFreedContext(true);
}
@Override
@ -126,10 +127,10 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
continue;
}
searchServiceTransportAction.sendFreeContext(node, target.v2(), request, new ActionListener<Boolean>() {
searchServiceTransportAction.sendFreeContext(node, target.v2(), request, new ActionListener<SearchServiceTransportAction.SearchFreeContextResponse>() {
@Override
public void onResponse(Boolean freed) {
onFreedContext(freed);
public void onResponse(SearchServiceTransportAction.SearchFreeContextResponse freed) {
onFreedContext(freed.isFreed());
}
@Override

View File

@ -28,7 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchServiceListener;
import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResultProvider;
@ -67,7 +66,7 @@ public class TransportSearchCountAction extends TransportSearchTypeAction {
}
@Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, SearchServiceListener<QuerySearchResultProvider> listener) {
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<QuerySearchResultProvider> listener) {
searchService.sendExecuteQuery(node, request, listener);
}

View File

@ -30,7 +30,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchServiceListener;
import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs;
@ -75,7 +74,7 @@ public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAc
}
@Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, SearchServiceListener<DfsSearchResult> listener) {
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<DfsSearchResult> listener) {
searchService.sendExecuteDfs(node, request, listener);
}
@ -93,9 +92,9 @@ public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAc
}
void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, DiscoveryNode node, final QuerySearchRequest querySearchRequest) {
searchService.sendExecuteFetch(node, querySearchRequest, new SearchServiceListener<QueryFetchSearchResult>() {
searchService.sendExecuteFetch(node, querySearchRequest, new ActionListener<QueryFetchSearchResult>() {
@Override
public void onResult(QueryFetchSearchResult result) {
public void onResponse(QueryFetchSearchResult result) {
result.shardTarget(dfsResult.shardTarget());
queryFetchResults.set(shardIndex, result);
if (counter.decrementAndGet() == 0) {

View File

@ -34,7 +34,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchServiceListener;
import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs;
@ -85,7 +84,7 @@ public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeA
}
@Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, SearchServiceListener<DfsSearchResult> listener) {
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<DfsSearchResult> listener) {
searchService.sendExecuteDfs(node, request, listener);
}
@ -102,9 +101,9 @@ public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeA
}
void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, final QuerySearchRequest querySearchRequest, DiscoveryNode node) {
searchService.sendExecuteQuery(node, querySearchRequest, new SearchServiceListener<QuerySearchResult>() {
searchService.sendExecuteQuery(node, querySearchRequest, new ActionListener<QuerySearchResult>() {
@Override
public void onResult(QuerySearchResult result) {
public void onResponse(QuerySearchResult result) {
result.shardTarget(dfsResult.shardTarget());
queryResults.set(shardIndex, result);
if (counter.decrementAndGet() == 0) {
@ -165,9 +164,9 @@ public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeA
}
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener<FetchSearchResult>() {
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
@Override
public void onResult(FetchSearchResult result) {
public void onResponse(FetchSearchResult result) {
result.shardTarget(shardTarget);
fetchResults.set(shardIndex, result);
if (counter.decrementAndGet() == 0) {

View File

@ -29,7 +29,6 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.action.SearchServiceListener;
import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
@ -69,7 +68,7 @@ public class TransportSearchQueryAndFetchAction extends TransportSearchTypeActio
}
@Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, SearchServiceListener<QueryFetchSearchResult> listener) {
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<QueryFetchSearchResult> listener) {
searchService.sendExecuteFetch(node, request, listener);
}

View File

@ -33,7 +33,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchServiceListener;
import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
@ -79,7 +78,7 @@ public class TransportSearchQueryThenFetchAction extends TransportSearchTypeActi
}
@Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, SearchServiceListener<QuerySearchResultProvider> listener) {
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<QuerySearchResultProvider> listener) {
searchService.sendExecuteQuery(node, request, listener);
}
@ -107,9 +106,9 @@ public class TransportSearchQueryThenFetchAction extends TransportSearchTypeActi
}
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener<FetchSearchResult>() {
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
@Override
public void onResult(FetchSearchResult result) {
public void onResponse(FetchSearchResult result) {
result.shardTarget(shardTarget);
fetchResults.set(shardIndex, result);
if (counter.decrementAndGet() == 0) {

View File

@ -29,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchServiceListener;
import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResultProvider;
@ -65,7 +64,7 @@ public class TransportSearchScanAction extends TransportSearchTypeAction {
}
@Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, SearchServiceListener<QuerySearchResult> listener) {
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<QuerySearchResult> listener) {
searchService.sendExecuteScan(node, request, listener);
}

View File

@ -30,10 +30,10 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchServiceListener;
import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
import org.elasticsearch.search.internal.InternalSearchResponse;
@ -148,10 +148,10 @@ public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
searchService.sendExecuteFetch(node, internalRequest, new SearchServiceListener<QueryFetchSearchResult>() {
searchService.sendExecuteFetch(node, internalRequest, new ActionListener<ScrollQueryFetchSearchResult>() {
@Override
public void onResult(QueryFetchSearchResult result) {
queryFetchResults.set(shardIndex, result);
public void onResponse(ScrollQueryFetchSearchResult result) {
queryFetchResults.set(shardIndex, result.result());
if (counter.decrementAndGet() == 0) {
finishHim();
}

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchServiceListener;
import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResult;
@ -39,6 +38,7 @@ import org.elasticsearch.search.fetch.ShardFetchRequest;
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.ScrollQuerySearchResult;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
@ -149,10 +149,10 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) {
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
searchService.sendExecuteQuery(node, internalRequest, new SearchServiceListener<QuerySearchResult>() {
searchService.sendExecuteQuery(node, internalRequest, new ActionListener<ScrollQuerySearchResult>() {
@Override
public void onResult(QuerySearchResult result) {
queryResults.set(shardIndex, result);
public void onResponse(ScrollQuerySearchResult result) {
queryResults.set(shardIndex, result.queryResult());
if (counter.decrementAndGet() == 0) {
try {
executeFetchPhase();
@ -207,9 +207,9 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(request, querySearchResult.id(), docIds, lastEmittedDoc);
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
searchService.sendExecuteFetchScroll(node, shardFetchRequest, new SearchServiceListener<FetchSearchResult>() {
searchService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener<FetchSearchResult>() {
@Override
public void onResult(FetchSearchResult result) {
public void onResponse(FetchSearchResult result) {
result.shardTarget(querySearchResult.shardTarget());
fetchResults.set(entry.index, result);
if (counter.decrementAndGet() == 0) {

View File

@ -33,10 +33,10 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchServiceListener;
import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
import org.elasticsearch.search.internal.InternalSearchHits;
import org.elasticsearch.search.internal.InternalSearchResponse;
@ -156,10 +156,10 @@ public class TransportSearchScrollScanAction extends AbstractComponent {
}
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
searchService.sendExecuteScan(node, internalScrollSearchRequest(searchId, request), new SearchServiceListener<QueryFetchSearchResult>() {
searchService.sendExecuteScan(node, internalScrollSearchRequest(searchId, request), new ActionListener<ScrollQueryFetchSearchResult>() {
@Override
public void onResult(QueryFetchSearchResult result) {
queryFetchResults.set(shardIndex, result);
public void onResponse(ScrollQueryFetchSearchResult result) {
queryFetchResults.set(shardIndex, result.result());
if (counter.decrementAndGet() == 0) {
finishHim();
}

View File

@ -48,7 +48,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchServiceListener;
import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
@ -160,9 +159,9 @@ public abstract class TransportSearchTypeAction extends TransportAction<SearchRe
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
} else {
String[] filteringAliases = clusterState.metaData().filteringAliases(shard.index(), request.indices());
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, startTime()), new SearchServiceListener<FirstResult>() {
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, startTime()), new ActionListener<FirstResult>() {
@Override
public void onResult(FirstResult result) {
public void onResponse(FirstResult result) {
onFirstPhaseResult(shardIndex, shard, result, shardIt);
}
@ -351,7 +350,7 @@ public abstract class TransportSearchTypeAction extends TransportAction<SearchRe
}
}
protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, SearchServiceListener<FirstResult> listener);
protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<FirstResult> listener);
protected final void processFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result) {
firstResults.set(shardIndex, result);

View File

@ -79,7 +79,7 @@ public class TransportSuggestAction extends TransportBroadcastOperationAction<Su
}
@Override
protected SuggestRequest newRequest() {
protected SuggestRequest newRequestInstance() {
return new SuggestRequest();
}

View File

@ -36,7 +36,7 @@ public abstract class HandledTransportAction<Request extends ActionRequest, Resp
* Sub classes implement this call to get new instance of a Request object
* @return Request
*/
public abstract Request newRequestInstance();
protected abstract Request newRequestInstance();
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters){
super(settings, actionName, threadPool, actionFilters);
@ -48,7 +48,7 @@ public abstract class HandledTransportAction<Request extends ActionRequest, Resp
});
}
private abstract class TransportHandler extends BaseTransportRequestHandler<Request>{
abstract class TransportHandler extends BaseTransportRequestHandler<Request>{
/**
* Call to get an instance of type Request

View File

@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.TransportAction;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.cluster.ClusterService;
@ -45,7 +46,7 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
*
*/
public abstract class TransportBroadcastOperationAction<Request extends BroadcastOperationRequest, Response extends BroadcastOperationResponse, ShardRequest extends BroadcastShardOperationRequest, ShardResponse extends BroadcastShardOperationResponse>
extends TransportAction<Request, Response> {
extends HandledTransportAction<Request, Response> {
protected final ThreadPool threadPool;
protected final ClusterService clusterService;
@ -55,14 +56,13 @@ public abstract class TransportBroadcastOperationAction<Request extends Broadcas
final String executor;
protected TransportBroadcastOperationAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) {
super(settings, actionName, threadPool, actionFilters);
super(settings, actionName, threadPool, transportService, actionFilters);
this.clusterService = clusterService;
this.transportService = transportService;
this.threadPool = threadPool;
this.transportShardAction = actionName + "[s]";
this.executor = executor();
transportService.registerHandler(actionName, new TransportHandler());
transportService.registerHandler(transportShardAction, new ShardTransportHandler());
}
@ -73,8 +73,6 @@ public abstract class TransportBroadcastOperationAction<Request extends Broadcas
protected abstract String executor();
protected abstract Request newRequest();
protected abstract Response newResponse(Request request, AtomicReferenceArray shardsResponses, ClusterState clusterState);
protected abstract ShardRequest newShardRequest();
@ -162,18 +160,6 @@ public abstract class TransportBroadcastOperationAction<Request extends Broadcas
} else {
try {
final ShardRequest shardRequest = newShardRequest(shardIt.size(), shard, request);
if (shard.currentNodeId().equals(nodes.localNodeId())) {
threadPool.executor(executor).execute(new Runnable() {
@Override
public void run() {
try {
onOperation(shard, shardIndex, shardOperation(shardRequest));
} catch (Throwable e) {
onOperation(shard, shardIt, shardIndex, e);
}
}
});
} else {
DiscoveryNode node = nodes.get(shard.currentNodeId());
if (node == null) {
// no node connected, act as failure
@ -201,7 +187,6 @@ public abstract class TransportBroadcastOperationAction<Request extends Broadcas
}
});
}
}
} catch (Throwable e) {
onOperation(shard, shardIt, shardIndex, e);
}
@ -283,44 +268,6 @@ public abstract class TransportBroadcastOperationAction<Request extends Broadcas
}
}
class TransportHandler extends BaseTransportRequestHandler<Request> {
@Override
public Request newInstance() {
return newRequest();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void messageReceived(Request request, final TransportChannel channel) throws Exception {
// we just send back a response, no need to fork a listener
request.listenerThreaded(false);
execute(request, new ActionListener<Response>() {
@Override
public void onResponse(Response response) {
try {
channel.sendResponse(response);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send response", e1);
}
}
});
}
}
class ShardTransportHandler extends BaseTransportRequestHandler<ShardRequest> {
@Override

View File

@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.NoSuchNodeException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.TransportAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
@ -40,12 +41,10 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
/**
*
*/
public abstract class TransportNodesOperationAction<Request extends NodesOperationRequest, Response extends NodesOperationResponse, NodeRequest extends NodeOperationRequest, NodeResponse extends NodeOperationResponse> extends TransportAction<Request, Response> {
public abstract class TransportNodesOperationAction<Request extends NodesOperationRequest, Response extends NodesOperationResponse, NodeRequest extends NodeOperationRequest, NodeResponse extends NodeOperationResponse> extends HandledTransportAction<Request, Response> {
protected final ClusterName clusterName;
protected final ClusterService clusterService;
protected final TransportService transportService;
final String transportNodeAction;
@ -53,7 +52,7 @@ public abstract class TransportNodesOperationAction<Request extends NodesOperati
protected TransportNodesOperationAction(Settings settings, String actionName, ClusterName clusterName, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) {
super(settings, actionName, threadPool, actionFilters);
super(settings, actionName, threadPool, transportService, actionFilters);
this.clusterName = clusterName;
this.clusterService = clusterService;
this.transportService = transportService;
@ -61,7 +60,6 @@ public abstract class TransportNodesOperationAction<Request extends NodesOperati
this.transportNodeAction = actionName + "[n]";
this.executor = executor();
transportService.registerHandler(actionName, new TransportHandler());
transportService.registerHandler(transportNodeAction, new NodeTransportHandler());
}
@ -76,8 +74,6 @@ public abstract class TransportNodesOperationAction<Request extends NodesOperati
protected abstract String executor();
protected abstract Request newRequest();
protected abstract Response newResponse(Request request, AtomicReferenceArray nodesResponses);
protected abstract NodeRequest newNodeRequest();
@ -133,32 +129,12 @@ public abstract class TransportNodesOperationAction<Request extends NodesOperati
final int idx = i;
final DiscoveryNode node = clusterState.nodes().nodes().get(nodeId);
try {
if (nodeId.equals("_local") || nodeId.equals(clusterState.nodes().localNodeId())) {
threadPool.executor(executor()).execute(new Runnable() {
@Override
public void run() {
try {
onOperation(idx, nodeOperation(newNodeRequest(clusterState.nodes().localNodeId(), request)));
} catch (Throwable e) {
onFailure(idx, clusterState.nodes().localNodeId(), e);
}
}
});
} else if (nodeId.equals("_master")) {
threadPool.executor(executor()).execute(new Runnable() {
@Override
public void run() {
try {
onOperation(idx, nodeOperation(newNodeRequest(clusterState.nodes().masterNodeId(), request)));
} catch (Throwable e) {
onFailure(idx, clusterState.nodes().masterNodeId(), e);
}
}
});
} else {
if (node == null) {
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
} else if (!clusterService.localNode().shouldConnectTo(node)) {
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we need to fix
// those (and they randomize the client node usage, so tricky to find when)
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
} else {
NodeRequest nodeRequest = newNodeRequest(nodeId, request);
@ -184,7 +160,6 @@ public abstract class TransportNodesOperationAction<Request extends NodesOperati
}
});
}
}
} catch (Throwable t) {
onFailure(idx, nodeId, t);
}
@ -223,49 +198,6 @@ public abstract class TransportNodesOperationAction<Request extends NodesOperati
}
}
private class TransportHandler extends BaseTransportRequestHandler<Request> {
@Override
public Request newInstance() {
return newRequest();
}
@Override
public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
request.listenerThreaded(false);
execute(request, new ActionListener<Response>() {
@Override
public void onResponse(Response response) {
TransportResponseOptions options = TransportResponseOptions.options().withCompress(transportCompress());
try {
channel.sendResponse(response, options);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send response", e);
}
}
});
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public String toString() {
return actionName;
}
}
private class NodeTransportHandler extends BaseTransportRequestHandler<NodeRequest> {
@Override

View File

@ -104,9 +104,14 @@ final class TermVectorsWriter {
if (flags.contains(Flag.TermStatistics)) {
// get the doc frequency
if (dfs != null) {
writeTermStatistics(dfs.termStatistics().get(term));
final TermStatistics statistics = dfs.termStatistics().get(term);
writeTermStatistics(statistics == null ? new TermStatistics(termBytesRef, 0, 0) : statistics);
} else {
if (foundTerm) {
writeTermStatistics(topLevelIterator);
} else {
writeTermStatistics(new TermStatistics(termBytesRef, 0, 0));
}
}
}
if (useDocsAndPos) {

View File

@ -80,7 +80,7 @@ public class TransportDfsOnlyAction extends TransportBroadcastOperationAction<Df
}
@Override
protected DfsOnlyRequest newRequest() {
protected DfsOnlyRequest newRequestInstance() {
return new DfsOnlyRequest();
}

View File

@ -26,6 +26,8 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.spi.Message;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.jna.Natives;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
@ -153,7 +155,7 @@ public class Bootstrap {
if (pidFile != null) {
try {
PidFile.create(Paths.get(pidFile), true);
PidFile.create(PathUtils.get(pidFile), true);
} catch (Exception e) {
String errorMessage = buildErrorMessage("pid", e);
sysError(errorMessage, true);

View File

@ -68,7 +68,7 @@ public class JVMCheck {
if (workAround != null) {
sb.append(System.lineSeparator());
sb.append("If you absolutely cannot upgrade, please add ").append(workAround);
sb.append(" to the JVM_OPTS environment variable.");
sb.append(" to the JAVA_OPTS environment variable.");
sb.append(System.lineSeparator());
sb.append("Upgrading is preferred, this workaround will result in degraded performance.");
}

View File

@ -19,9 +19,10 @@
package org.elasticsearch.cluster.action.index;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
@ -37,7 +38,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.compress.CompressedString;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
@ -46,20 +46,20 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.TimeoutException;
/**
* Called by shards in the cluster when their mapping was dynamically updated and it needs to be updated
@ -67,24 +67,23 @@ import java.util.concurrent.atomic.AtomicLong;
*/
public class MappingUpdatedAction extends TransportMasterNodeOperationAction<MappingUpdatedAction.MappingUpdatedRequest, MappingUpdatedAction.MappingUpdatedResponse> {
public static final String INDICES_MAPPING_ADDITIONAL_MAPPING_CHANGE_TIME = "indices.mapping.additional_mapping_change_time";
public static final String INDICES_MAPPING_DYNAMIC_TIMEOUT = "indices.mapping.dynamic_timeout";
public static final String ACTION_NAME = "internal:cluster/mapping_updated";
private final AtomicLong mappingUpdateOrderGen = new AtomicLong();
private final MetaDataMappingService metaDataMappingService;
private volatile MasterMappingUpdater masterMappingUpdater;
private volatile TimeValue additionalMappingChangeTime;
private volatile TimeValue dynamicMappingUpdateTimeout;
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
final TimeValue current = MappingUpdatedAction.this.additionalMappingChangeTime;
final TimeValue newValue = settings.getAsTime(INDICES_MAPPING_ADDITIONAL_MAPPING_CHANGE_TIME, current);
TimeValue current = MappingUpdatedAction.this.dynamicMappingUpdateTimeout;
TimeValue newValue = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, current);
if (!current.equals(newValue)) {
logger.info("updating " + INDICES_MAPPING_ADDITIONAL_MAPPING_CHANGE_TIME + " from [{}] to [{}]", current, newValue);
MappingUpdatedAction.this.additionalMappingChangeTime = newValue;
logger.info("updating " + INDICES_MAPPING_DYNAMIC_TIMEOUT + " from [{}] to [{}]", current, newValue);
MappingUpdatedAction.this.dynamicMappingUpdateTimeout = newValue;
}
}
}
@ -94,8 +93,7 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction<Map
MetaDataMappingService metaDataMappingService, NodeSettingsService nodeSettingsService, ActionFilters actionFilters) {
super(settings, ACTION_NAME, transportService, clusterService, threadPool, actionFilters);
this.metaDataMappingService = metaDataMappingService;
// this setting should probably always be 0, just add the option to wait for more changes within a time window
this.additionalMappingChangeTime = settings.getAsTime(INDICES_MAPPING_ADDITIONAL_MAPPING_CHANGE_TIME, TimeValue.timeValueMillis(0));
this.dynamicMappingUpdateTimeout = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, TimeValue.timeValueSeconds(30));
nodeSettingsService.addListener(new ApplySettings());
}
@ -109,13 +107,58 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction<Map
this.masterMappingUpdater = null;
}
public void updateMappingOnMaster(String index, DocumentMapper documentMapper, String indexUUID) {
updateMappingOnMaster(index, documentMapper, indexUUID, null);
public void updateMappingOnMaster(String index, String indexUUID, String type, Mapping mappingUpdate, MappingUpdateListener listener) {
if (type.equals(MapperService.DEFAULT_MAPPING)) {
throw new ElasticsearchIllegalArgumentException("_default_ mapping should not be updated");
}
try {
XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
mappingUpdate.toXContent(builder, new ToXContent.MapParams(ImmutableMap.<String, String>of()));
final CompressedString mappingSource = new CompressedString(builder.endObject().bytes());
masterMappingUpdater.add(new MappingChange(index, indexUUID, type, mappingSource, listener));
} catch (IOException bogus) {
throw new AssertionError("Cannot happen", bogus);
}
}
public void updateMappingOnMaster(String index, DocumentMapper documentMapper, String indexUUID, MappingUpdateListener listener) {
assert !documentMapper.type().equals(MapperService.DEFAULT_MAPPING) : "_default_ mapping should not be updated";
masterMappingUpdater.add(new MappingChange(documentMapper, index, indexUUID, listener));
/**
* Same as {@link #updateMappingOnMasterSynchronously(String, String, String, Mapping, TimeValue)}
* using the default timeout.
*/
public void updateMappingOnMasterSynchronously(String index, String indexUUID, String type, Mapping mappingUpdate) throws Throwable {
updateMappingOnMasterSynchronously(index, indexUUID, type, mappingUpdate, dynamicMappingUpdateTimeout);
}
/**
* Update mappings synchronously on the master node, waiting for at most
* {@code timeout}. When this method returns successfully mappings have
* been applied to the master node and propagated to data nodes.
*/
public void updateMappingOnMasterSynchronously(String index, String indexUUID, String type, Mapping mappingUpdate, TimeValue timeout) throws Throwable {
final CountDownLatch latch = new CountDownLatch(1);
final Throwable[] cause = new Throwable[1];
final MappingUpdateListener listener = new MappingUpdateListener() {
@Override
public void onMappingUpdate() {
latch.countDown();
}
@Override
public void onFailure(Throwable t) {
cause[0] = t;
latch.countDown();
}
};
updateMappingOnMaster(index, indexUUID, type, mappingUpdate, listener);
if (!latch.await(timeout.getMillis(), TimeUnit.MILLISECONDS)) {
throw new TimeoutException("Time out while waiting for the master node to validate a mapping update for type [" + type + "]");
}
if (cause[0] != null) {
throw cause[0];
}
}
@Override
@ -142,7 +185,7 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction<Map
@Override
protected void masterOperation(final MappingUpdatedRequest request, final ClusterState state, final ActionListener<MappingUpdatedResponse> listener) throws ElasticsearchException {
metaDataMappingService.updateMapping(request.index(), request.indexUUID(), request.type(), request.mappingSource(), request.order, request.nodeId, new ActionListener<ClusterStateUpdateResponse>() {
metaDataMappingService.updateMapping(request.index(), request.indexUUID(), request.type(), request.mappingSource(), request.nodeId, new ActionListener<ClusterStateUpdateResponse>() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new MappingUpdatedResponse());
@ -174,18 +217,16 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction<Map
private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
private String type;
private CompressedString mappingSource;
private long order = -1; // -1 means not set...
private String nodeId = null; // null means not set
MappingUpdatedRequest() {
}
public MappingUpdatedRequest(String index, String indexUUID, String type, CompressedString mappingSource, long order, String nodeId) {
public MappingUpdatedRequest(String index, String indexUUID, String type, CompressedString mappingSource, String nodeId) {
this.index = index;
this.indexUUID = indexUUID;
this.type = type;
this.mappingSource = mappingSource;
this.order = order;
this.nodeId = nodeId;
}
@ -215,13 +256,6 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction<Map
return mappingSource;
}
/**
* Returns -1 if not set...
*/
public long order() {
return this.order;
}
/**
* Returns null for not set.
*/
@ -241,7 +275,6 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction<Map
type = in.readString();
mappingSource = CompressedString.readCompressedString(in);
indexUUID = in.readString();
order = in.readLong();
nodeId = in.readOptionalString();
}
@ -252,7 +285,6 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction<Map
out.writeString(type);
mappingSource.writeTo(out);
out.writeString(indexUUID);
out.writeLong(order);
out.writeOptionalString(nodeId);
}
@ -263,15 +295,17 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction<Map
}
private static class MappingChange {
public final DocumentMapper documentMapper;
public final String index;
public final String indexUUID;
public final String type;
public final CompressedString mappingSource;
public final MappingUpdateListener listener;
MappingChange(DocumentMapper documentMapper, String index, String indexUUID, MappingUpdateListener listener) {
this.documentMapper = documentMapper;
MappingChange(String index, String indexUUID, String type, CompressedString mappingSource, MappingUpdateListener listener) {
this.index = index;
this.indexUUID = indexUUID;
this.type = type;
this.mappingSource = mappingSource;
this.listener = listener;
}
}
@ -313,113 +347,28 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction<Map
this.interrupt();
}
class UpdateKey {
public final String indexUUID;
public final String type;
UpdateKey(String indexUUID, String type) {
this.indexUUID = indexUUID;
this.type = type;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
UpdateKey updateKey = (UpdateKey) o;
if (!indexUUID.equals(updateKey.indexUUID)) {
return false;
}
if (!type.equals(updateKey.type)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = indexUUID.hashCode();
result = 31 * result + type.hashCode();
return result;
}
}
class UpdateValue {
public final MappingChange mainChange;
public final List<MappingUpdateListener> listeners = Lists.newArrayList();
UpdateValue(MappingChange mainChange) {
this.mainChange = mainChange;
}
public void notifyListeners(@Nullable Throwable t) {
for (MappingUpdateListener listener : listeners) {
try {
if (t == null) {
listener.onMappingUpdate();
} else {
listener.onFailure(t);
}
} catch (Throwable lisFailure) {
logger.warn("unexpected failure on mapping update listener callback [{}]", lisFailure, listener);
}
}
}
}
@Override
public void run() {
Map<UpdateKey, UpdateValue> pendingUpdates = Maps.newHashMap();
while (running) {
MappingUpdateListener listener = null;
try {
MappingChange polledChange = queue.poll(10, TimeUnit.MINUTES);
if (polledChange == null) {
final MappingChange change = queue.poll(10, TimeUnit.MINUTES);
if (change == null) {
continue;
}
List<MappingChange> changes = Lists.newArrayList(polledChange);
if (additionalMappingChangeTime.millis() > 0) {
Thread.sleep(additionalMappingChangeTime.millis());
}
queue.drainTo(changes);
Collections.reverse(changes); // process then in newest one to oldest
// go over and add to pending updates map
for (MappingChange change : changes) {
UpdateKey key = new UpdateKey(change.indexUUID, change.documentMapper.type());
UpdateValue updateValue = pendingUpdates.get(key);
if (updateValue == null) {
updateValue = new UpdateValue(change);
pendingUpdates.put(key, updateValue);
}
if (change.listener != null) {
updateValue.listeners.add(change.listener);
}
}
for (Iterator<UpdateValue> iterator = pendingUpdates.values().iterator(); iterator.hasNext(); ) {
final UpdateValue updateValue = iterator.next();
iterator.remove();
MappingChange change = updateValue.mainChange;
listener = change.listener;
final MappingUpdatedAction.MappingUpdatedRequest mappingRequest;
try {
// we generate the order id before we get the mapping to send and refresh the source, so
// if 2 happen concurrently, we know that the later order will include the previous one
long orderId = mappingUpdateOrderGen.incrementAndGet();
change.documentMapper.refreshSource();
DiscoveryNode node = clusterService.localNode();
mappingRequest = new MappingUpdatedAction.MappingUpdatedRequest(
change.index, change.indexUUID, change.documentMapper.type(), change.documentMapper.mappingSource(), orderId, node != null ? node.id() : null
change.index, change.indexUUID, change.type, change.mappingSource, node != null ? node.id() : null
);
} catch (Throwable t) {
logger.warn("Failed to update master on updated mapping for index [" + change.index + "], type [" + change.documentMapper.type() + "]", t);
updateValue.notifyListeners(t);
logger.warn("Failed to update master on updated mapping for index [" + change.index + "], type [" + change.type + "]", t);
if (change.listener != null) {
change.listener.onFailure(t);
}
continue;
}
logger.trace("sending mapping updated to master: {}", mappingRequest);
@ -427,28 +376,30 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction<Map
@Override
public void onResponse(MappingUpdatedAction.MappingUpdatedResponse mappingUpdatedResponse) {
logger.debug("successfully updated master with mapping update: {}", mappingRequest);
updateValue.notifyListeners(null);
if (change.listener != null) {
change.listener.onMappingUpdate();
}
}
@Override
public void onFailure(Throwable e) {
logger.warn("failed to update master on updated mapping for {}", e, mappingRequest);
updateValue.notifyListeners(e);
if (change.listener != null) {
change.listener.onFailure(e);
}
}
});
}
} catch (Throwable t) {
if (listener != null) {
// even if the failure is expected, eg. if we got interrupted,
// we need to notify the listener as there might be a latch
// waiting for it to be called
listener.onFailure(t);
}
if (t instanceof InterruptedException && !running) {
// all is well, we are shutting down
} else {
logger.warn("failed to process mapping updates", t);
}
// cleanup all pending update callbacks that were not processed due to a global failure...
for (Iterator<Map.Entry<UpdateKey, UpdateValue>> iterator = pendingUpdates.entrySet().iterator(); iterator.hasNext(); ) {
Map.Entry<UpdateKey, UpdateValue> entry = iterator.next();
iterator.remove();
entry.getValue().notifyListeners(t);
logger.warn("failed to process mapping update", t);
}
}
}

View File

@ -43,9 +43,7 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.indices.IndexMissingException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidTypeNameException;
import org.elasticsearch.indices.TypeMissingException;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.*;
@ -57,7 +55,6 @@ import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlag
*/
public class MetaDataMappingService extends AbstractComponent {
private final ThreadPool threadPool;
private final ClusterService clusterService;
private final IndicesService indicesService;
@ -68,9 +65,8 @@ public class MetaDataMappingService extends AbstractComponent {
private long refreshOrUpdateProcessedInsertOrder;
@Inject
public MetaDataMappingService(Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService) {
public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService) {
super(settings);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.indicesService = indicesService;
}
@ -97,15 +93,13 @@ public class MetaDataMappingService extends AbstractComponent {
static class UpdateTask extends MappingTask {
final String type;
final CompressedString mappingSource;
final long order; // -1 for unknown
final String nodeId; // null fr unknown
final ActionListener<ClusterStateUpdateResponse> listener;
UpdateTask(String index, String indexUUID, String type, CompressedString mappingSource, long order, String nodeId, ActionListener<ClusterStateUpdateResponse> listener) {
UpdateTask(String index, String indexUUID, String type, CompressedString mappingSource, String nodeId, ActionListener<ClusterStateUpdateResponse> listener) {
super(index, indexUUID);
this.type = type;
this.mappingSource = mappingSource;
this.order = order;
this.nodeId = nodeId;
this.listener = listener;
}
@ -176,36 +170,8 @@ public class MetaDataMappingService extends AbstractComponent {
logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
continue;
}
boolean add = true;
// if its an update task, make sure we only process the latest ordered one per node
if (task instanceof UpdateTask) {
UpdateTask uTask = (UpdateTask) task;
// we can only do something to compare if we have the order && node
if (uTask.order != -1 && uTask.nodeId != null) {
for (int i = 0; i < tasks.size(); i++) {
MappingTask existing = tasks.get(i);
if (existing instanceof UpdateTask) {
UpdateTask eTask = (UpdateTask) existing;
if (eTask.type.equals(uTask.type)) {
// if we have the order, and the node id, then we can compare, and replace if applicable
if (eTask.order != -1 && eTask.nodeId != null) {
if (eTask.nodeId.equals(uTask.nodeId) && uTask.order > eTask.order) {
// a newer update task, we can replace so we execute it one!
tasks.set(i, uTask);
add = false;
break;
}
}
}
}
}
}
}
if (add) {
tasks.add(task);
}
}
// construct the actual index if needed, and make sure the relevant mappings are there
boolean removeIndex = false;
@ -365,13 +331,13 @@ public class MetaDataMappingService extends AbstractComponent {
});
}
public void updateMapping(final String index, final String indexUUID, final String type, final CompressedString mappingSource, final long order, final String nodeId, final ActionListener<ClusterStateUpdateResponse> listener) {
public void updateMapping(final String index, final String indexUUID, final String type, final CompressedString mappingSource, final String nodeId, final ActionListener<ClusterStateUpdateResponse> listener) {
final long insertOrder;
synchronized (refreshOrUpdateMutex) {
insertOrder = ++refreshOrUpdateInsertOrder;
refreshOrUpdateQueue.add(new UpdateTask(index, indexUUID, type, mappingSource, order, nodeId, listener));
refreshOrUpdateQueue.add(new UpdateTask(index, indexUUID, type, mappingSource, nodeId, listener));
}
clusterService.submitStateUpdateTask("update-mapping [" + index + "][" + type + "] / node [" + nodeId + "], order [" + order + "]", Priority.HIGH, new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("update-mapping [" + index + "][" + type + "] / node [" + nodeId + "]", Priority.HIGH, new ProcessedClusterStateUpdateTask() {
private volatile List<MappingTask> allTasks;
@Override
@ -398,7 +364,7 @@ public class MetaDataMappingService extends AbstractComponent {
try {
uTask.listener.onResponse(response);
} catch (Throwable t) {
logger.debug("failed ot ping back on response of mapping processing for task [{}]", t, uTask.listener);
logger.debug("failed to ping back on response of mapping processing for task [{}]", t, uTask.listener);
}
}
}
@ -457,7 +423,7 @@ public class MetaDataMappingService extends AbstractComponent {
newMapper = indexService.mapperService().parse(request.type(), new CompressedString(request.source()), existingMapper == null);
if (existingMapper != null) {
// first, simulate
DocumentMapper.MergeResult mergeResult = existingMapper.merge(newMapper, mergeFlags().simulate(true));
DocumentMapper.MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), mergeFlags().simulate(true));
// if we have conflicts, and we are not supposed to ignore them, throw an exception
if (!request.ignoreConflicts() && mergeResult.hasConflicts()) {
throw new MergeMappingException(mergeResult.conflicts());

View File

@ -68,7 +68,7 @@ public class ClusterDynamicSettingsModule extends AbstractModule {
clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_TYPE);
clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
clusterDynamicSettings.addDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME);
clusterDynamicSettings.addDynamicSetting(MappingUpdatedAction.INDICES_MAPPING_ADDITIONAL_MAPPING_CHANGE_TIME, Validator.TIME);
clusterDynamicSettings.addDynamicSetting(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, Validator.TIME);
clusterDynamicSettings.addDynamicSetting(MetaData.SETTING_READ_ONLY);
clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, Validator.BYTES_SIZE);
clusterDynamicSettings.addDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS, Validator.INTEGER);

View File

@ -208,7 +208,7 @@ public final class FileSystemUtils {
} else if (suffix != null) {
if (!isSameFile(file, path)) {
// If it already exists we try to copy this new version appending suffix to its name
path = Paths.get(path.toString().concat(suffix));
path = path.resolveSibling(path.getFileName().toString().concat(suffix));
// We just move the file to new dir but with a new name (appended with suffix)
Files.move(file, path, StandardCopyOption.REPLACE_EXISTING);
}
@ -259,6 +259,8 @@ public final class FileSystemUtils {
}
}
// TODO: note that this will fail if source and target are on different NIO.2 filesystems.
static class TreeCopier extends SimpleFileVisitor<Path> {
private final Path source;
private final Path target;

View File

@ -0,0 +1,84 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.io;
import org.elasticsearch.common.SuppressForbidden;
import java.net.URI;
import java.nio.file.FileSystem;
import java.nio.file.FileSystems;
import java.nio.file.Path;
import java.nio.file.Paths;
/**
* Utilities for creating a Path from names,
* or accessing the default FileSystem.
* <p>
* This class allows the default filesystem to
* be changed during tests.
*/
@SuppressForbidden(reason = "accesses the default filesystem by design")
public final class PathUtils {
/** no instantiation */
private PathUtils() {}
/** the actual JDK default */
static final FileSystem ACTUAL_DEFAULT = FileSystems.getDefault();
/** can be changed by tests (via reflection) */
private static volatile FileSystem DEFAULT = ACTUAL_DEFAULT;
/**
* Returns a {@code Path} from name components.
* <p>
* This works just like {@code Paths.get()}.
* Remember: just like {@code Paths.get()} this is NOT A STRING CONCATENATION
* UTILITY FUNCTION.
* <p>
* Remember: this should almost never be used. Usually resolve
* a path against an existing one!
*/
public static Path get(String first, String... more) {
return DEFAULT.getPath(first, more);
}
/**
* Returns a {@code Path} from a URI
* <p>
* This works just like {@code Paths.get()}.
* <p>
* Remember: this should almost never be used. Usually resolve
* a path against an existing one!
*/
public static Path get(URI uri) {
if (uri.getScheme().equalsIgnoreCase("file")) {
return DEFAULT.provider().getPath(uri);
} else {
return Paths.get(uri);
}
}
/**
* Returns the default FileSystem.
*/
public static FileSystem getDefaultFileSystem() {
return DEFAULT;
}
}

View File

@ -20,16 +20,26 @@
package org.elasticsearch.common.lucene;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.lucene.analysis.core.KeywordAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.*;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.index.SegmentCommitInfo;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.Filter;
@ -43,8 +53,11 @@ import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TimeLimitingCollector;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.search.TotalHitCountCollector;
import org.apache.lucene.store.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.Version;
@ -64,7 +77,11 @@ import org.elasticsearch.index.fielddata.IndexFieldData;
import java.io.IOException;
import java.text.ParseException;
import java.util.*;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.common.lucene.search.NoopCollector.NOOP_COLLECTOR;
@ -236,10 +253,7 @@ public class Lucene {
}
public static long count(IndexSearcher searcher, Query query) throws IOException {
TotalHitCountCollector countCollector = new TotalHitCountCollector();
query = wrapCountQuery(query);
searcher.search(query, countCollector);
return countCollector.getTotalHits();
return searcher.count(query);
}
/**
@ -313,7 +327,6 @@ public class Lucene {
*/
public static boolean countWithEarlyTermination(IndexSearcher searcher, Filter filter, Query query,
EarlyTerminatingCollector collector) throws IOException {
query = wrapCountQuery(query);
try {
if (filter == null) {
searcher.search(query, collector);
@ -335,14 +348,6 @@ public class Lucene {
return createCountBasedEarlyTerminatingCollector(1);
}
private final static Query wrapCountQuery(Query query) {
// we don't need scores, so wrap it in a constant score query
if (!(query instanceof ConstantScoreQuery)) {
query = new ConstantScoreQuery(query);
}
return query;
}
/**
* Closes the index writer, returning <tt>false</tt> if it failed to close.
*/
@ -554,6 +559,9 @@ public class Lucene {
out.writeBoolean(false);
}
out.writeFloat(explanation.getValue());
if (explanation.getDescription() == null) {
throw new ElasticsearchIllegalArgumentException("Explanation descriptions should NOT be null\n[" + explanation.toString() + "]");
}
out.writeString(explanation.getDescription());
Explanation[] subExplanations = explanation.getDetails();
if (subExplanations == null) {

View File

@ -19,21 +19,18 @@
package org.elasticsearch.common.lucene.docset;
import com.google.common.collect.Iterables;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.InPlaceMergeSorter;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.lucene.search.XDocIdSetIterator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
/**
@ -93,7 +90,7 @@ public class AndDocIdSet extends DocIdSet {
return DocIdSetIterator.empty();
}
Bits bit = set.bits();
if (bit != null && DocIdSets.isBroken(it)) {
if (bit != null && bit instanceof BitSet == false) {
bits.add(bit);
} else {
iterators.add(it);
@ -138,7 +135,7 @@ public class AndDocIdSet extends DocIdSet {
}
}
static class IteratorBasedIterator extends XDocIdSetIterator {
static class IteratorBasedIterator extends DocIdSetIterator {
private int doc = -1;
private final DocIdSetIterator lead;
private final DocIdSetIterator[] otherIterators;
@ -174,16 +171,6 @@ public class AndDocIdSet extends DocIdSet {
this.otherIterators = Arrays.copyOfRange(sortedIterators, 1, sortedIterators.length);
}
@Override
public boolean isBroken() {
for (DocIdSetIterator it : Iterables.concat(Collections.singleton(lead), Arrays.asList(otherIterators))) {
if (DocIdSets.isBroken(it)) {
return true;
}
}
return false;
}
@Override
public final int docID() {
return doc;

View File

@ -22,8 +22,6 @@ package org.elasticsearch.common.lucene.docset;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.DocValuesDocIdSet;
import org.apache.lucene.search.FilteredDocIdSetIterator;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
@ -33,7 +31,6 @@ import org.apache.lucene.util.SparseFixedBitSet;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.ElasticsearchIllegalStateException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lucene.search.XDocIdSetIterator;
import java.io.IOException;
@ -55,31 +52,6 @@ public class DocIdSets {
return set == null || set == DocIdSet.EMPTY;
}
/**
* Check if the given iterator can nextDoc() or advance() in sub-linear time
* of the number of documents. For instance, an iterator that would need to
* iterate one document at a time to check for its value would be considered
* broken.
*/
public static boolean isBroken(DocIdSetIterator iterator) {
while (iterator instanceof FilteredDocIdSetIterator) {
// this iterator is filtered (likely by some bits)
// unwrap in order to check if the underlying iterator is fast
iterator = ((FilteredDocIdSetIterator) iterator).getDelegate();
}
if (iterator instanceof XDocIdSetIterator) {
return ((XDocIdSetIterator) iterator).isBroken();
}
if (iterator instanceof MatchDocIdSetIterator) {
return true;
}
// DocValuesDocIdSet produces anonymous slow iterators
if (iterator != null && DocValuesDocIdSet.class.equals(iterator.getClass().getEnclosingClass())) {
return true;
}
return false;
}
/**
* Converts to a cacheable {@link DocIdSet}
* <p/>

View File

@ -1,181 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.docset;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.RamUsageEstimator;
import java.io.IOException;
/**
* A {@link DocIdSet} that matches the "inverse" of the provided doc id set.
*/
public class NotDocIdSet extends DocIdSet {
private final DocIdSet set;
private final int maxDoc;
public NotDocIdSet(DocIdSet set, int maxDoc) {
this.maxDoc = maxDoc;
this.set = set;
}
@Override
public boolean isCacheable() {
return set.isCacheable();
}
@Override
public long ramBytesUsed() {
return RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_INT + set.ramBytesUsed();
}
@Override
public Bits bits() throws IOException {
Bits bits = set.bits();
if (bits == null) {
return null;
}
return new NotBits(bits);
}
@Override
public DocIdSetIterator iterator() throws IOException {
DocIdSetIterator it = set.iterator();
if (it == null) {
return new AllDocIdSet.Iterator(maxDoc);
}
// TODO: can we optimize for the FixedBitSet case?
// if we have bits, its much faster to just check on the flipped end potentially
// really depends on the nature of the Bits, specifically with FixedBitSet, where
// most of the docs are set?
Bits bits = set.bits();
if (bits != null) {
return new BitsBasedIterator(bits);
}
return new IteratorBasedIterator(maxDoc, it);
}
public static class NotBits implements Bits {
private final Bits bits;
public NotBits(Bits bits) {
this.bits = bits;
}
@Override
public boolean get(int index) {
return !bits.get(index);
}
@Override
public int length() {
return bits.length();
}
}
public static class BitsBasedIterator extends MatchDocIdSetIterator {
private final Bits bits;
public BitsBasedIterator(Bits bits) {
super(bits.length());
this.bits = bits;
}
@Override
protected boolean matchDoc(int doc) {
return !bits.get(doc);
}
@Override
public long cost() {
return bits.length();
}
}
public static class IteratorBasedIterator extends DocIdSetIterator {
private final int max;
private DocIdSetIterator it1;
private int lastReturn = -1;
private int innerDocid = -1;
private final long cost;
IteratorBasedIterator(int max, DocIdSetIterator it) throws IOException {
this.max = max;
this.it1 = it;
this.cost = it1.cost();
if ((innerDocid = it1.nextDoc()) == DocIdSetIterator.NO_MORE_DOCS) {
it1 = null;
}
}
@Override
public int docID() {
return lastReturn;
}
@Override
public int nextDoc() throws IOException {
return advance(0);
}
@Override
public int advance(int target) throws IOException {
if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) {
return DocIdSetIterator.NO_MORE_DOCS;
}
if (target <= lastReturn) target = lastReturn + 1;
if (it1 != null && innerDocid < target) {
if ((innerDocid = it1.advance(target)) == DocIdSetIterator.NO_MORE_DOCS) {
it1 = null;
}
}
while (it1 != null && innerDocid == target) {
target++;
if (target >= max) {
return (lastReturn = DocIdSetIterator.NO_MORE_DOCS);
}
if ((innerDocid = it1.advance(target)) == DocIdSetIterator.NO_MORE_DOCS) {
it1 = null;
}
}
// ADDED THIS, bug in original code
if (target >= max) {
return (lastReturn = DocIdSetIterator.NO_MORE_DOCS);
}
return (lastReturn = target);
}
@Override
public long cost() {
return cost;
}
}
}

View File

@ -1,263 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.docset;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.lucene.search.XDocIdSetIterator;
import java.io.IOException;
/**
*
*/
public class OrDocIdSet extends DocIdSet {
private final DocIdSet[] sets;
public OrDocIdSet(DocIdSet[] sets) {
this.sets = sets;
}
@Override
public boolean isCacheable() {
for (DocIdSet set : sets) {
if (!set.isCacheable()) {
return false;
}
}
return true;
}
@Override
public long ramBytesUsed() {
long ramBytesUsed = RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER;
for (DocIdSet set : sets) {
ramBytesUsed += RamUsageEstimator.NUM_BYTES_OBJECT_REF + set.ramBytesUsed();
}
return ramBytesUsed;
}
@Override
public Bits bits() throws IOException {
Bits[] bits = new Bits[sets.length];
for (int i = 0; i < sets.length; i++) {
bits[i] = sets[i].bits();
if (bits[i] == null) {
return null;
}
}
return new OrBits(bits);
}
@Override
public DocIdSetIterator iterator() throws IOException {
return new IteratorBasedIterator(sets);
}
/** A disjunction between several {@link Bits} instances with short-circuit logic. */
public static class OrBits implements Bits {
private final Bits[] bits;
public OrBits(Bits[] bits) {
this.bits = bits;
}
@Override
public boolean get(int index) {
for (Bits bit : bits) {
if (bit.get(index)) {
return true;
}
}
return false;
}
@Override
public int length() {
return bits[0].length();
}
}
static class IteratorBasedIterator extends XDocIdSetIterator {
final class Item {
public final DocIdSetIterator iter;
public int doc;
public Item(DocIdSetIterator iter) {
this.iter = iter;
this.doc = -1;
}
}
private int _curDoc;
private final Item[] _heap;
private int _size;
private final long cost;
private final boolean broken;
IteratorBasedIterator(DocIdSet[] sets) throws IOException {
_curDoc = -1;
_heap = new Item[sets.length];
_size = 0;
long cost = 0;
boolean broken = false;
for (DocIdSet set : sets) {
DocIdSetIterator iterator = set.iterator();
broken |= DocIdSets.isBroken(iterator);
if (iterator != null) {
_heap[_size++] = new Item(iterator);
cost += iterator.cost();
}
}
this.cost = cost;
this.broken = broken;
if (_size == 0) _curDoc = DocIdSetIterator.NO_MORE_DOCS;
}
@Override
public boolean isBroken() {
return broken;
}
@Override
public final int docID() {
return _curDoc;
}
@Override
public final int nextDoc() throws IOException {
if (_curDoc == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;
Item top = _heap[0];
while (true) {
DocIdSetIterator topIter = top.iter;
int docid;
if ((docid = topIter.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
top.doc = docid;
heapAdjust();
} else {
heapRemoveRoot();
if (_size == 0) return (_curDoc = DocIdSetIterator.NO_MORE_DOCS);
}
top = _heap[0];
int topDoc = top.doc;
if (topDoc > _curDoc) {
return (_curDoc = topDoc);
}
}
}
@Override
public final int advance(int target) throws IOException {
if (_curDoc == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;
if (target <= _curDoc) target = _curDoc + 1;
Item top = _heap[0];
while (true) {
DocIdSetIterator topIter = top.iter;
int docid;
if ((docid = topIter.advance(target)) != DocIdSetIterator.NO_MORE_DOCS) {
top.doc = docid;
heapAdjust();
} else {
heapRemoveRoot();
if (_size == 0) return (_curDoc = DocIdSetIterator.NO_MORE_DOCS);
}
top = _heap[0];
int topDoc = top.doc;
if (topDoc >= target) {
return (_curDoc = topDoc);
}
}
}
// Organize subScorers into a min heap with scorers generating the earlest document on top.
/*
private final void heapify() {
int size = _size;
for (int i=(size>>1)-1; i>=0; i--)
heapAdjust(i);
}
*/
/* The subtree of subScorers at root is a min heap except possibly for its root element.
* Bubble the root down as required to make the subtree a heap.
*/
private final void heapAdjust() {
final Item[] heap = _heap;
final Item top = heap[0];
final int doc = top.doc;
final int size = _size;
int i = 0;
while (true) {
int lchild = (i << 1) + 1;
if (lchild >= size) break;
Item left = heap[lchild];
int ldoc = left.doc;
int rchild = lchild + 1;
if (rchild < size) {
Item right = heap[rchild];
int rdoc = right.doc;
if (rdoc <= ldoc) {
if (doc <= rdoc) break;
heap[i] = right;
i = rchild;
continue;
}
}
if (doc <= ldoc) break;
heap[i] = left;
i = lchild;
}
heap[i] = top;
}
// Remove the root Scorer from subScorers and re-establish it as a heap
private void heapRemoveRoot() {
_size--;
if (_size > 0) {
Item tmp = _heap[0];
_heap[0] = _heap[_size];
_heap[_size] = tmp; // keep the finished iterator at the end for debugging
heapAdjust();
}
}
@Override
public long cost() {
return cost;
}
}
}

View File

@ -1,99 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Bits;
import org.elasticsearch.common.lucene.docset.AndDocIdSet;
import org.elasticsearch.common.lucene.docset.DocIdSets;
import java.io.IOException;
import java.util.List;
/**
*
*/
public class AndFilter extends Filter {
private final List<? extends Filter> filters;
public AndFilter(List<? extends Filter> filters) {
this.filters = filters;
}
public List<? extends Filter> filters() {
return filters;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
if (filters.size() == 1) {
return filters.get(0).getDocIdSet(context, acceptDocs);
}
DocIdSet[] sets = new DocIdSet[filters.size()];
for (int i = 0; i < filters.size(); i++) {
DocIdSet set = filters.get(i).getDocIdSet(context, null);
if (DocIdSets.isEmpty(set)) { // none matching for this filter, we AND, so return EMPTY
return null;
}
sets[i] = set;
}
return BitsFilteredDocIdSet.wrap(new AndDocIdSet(sets), acceptDocs);
}
@Override
public int hashCode() {
int hash = 7;
hash = 31 * hash + (null == filters ? 0 : filters.hashCode());
return hash;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if ((obj == null) || (obj.getClass() != this.getClass()))
return false;
AndFilter other = (AndFilter) obj;
return equalFilters(filters, other.filters);
}
@Override
public String toString(String field) {
StringBuilder builder = new StringBuilder();
for (Filter filter : filters) {
if (builder.length() > 0) {
builder.append(' ');
}
builder.append('+');
builder.append(filter);
}
return builder.toString();
}
private boolean equalFilters(List<? extends Filter> filters1, List<? extends Filter> filters2) {
return (filters1 == filters2) || ((filters1 != null) && filters1.equals(filters2));
}
}

View File

@ -1,66 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Bits;
import org.elasticsearch.common.lucene.docset.AllDocIdSet;
import java.io.IOException;
/**
* A filter that matches on all docs.
*/
public class MatchAllDocsFilter extends Filter {
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
return BitsFilteredDocIdSet.wrap(new AllDocIdSet(context.reader().maxDoc()), acceptDocs);
}
@Override
public int hashCode() {
return this.getClass().hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null) {
return false;
}
if (obj.getClass() == this.getClass()) {
return true;
}
return false;
}
@Override
public String toString(String field) {
return "*:*";
}
}

View File

@ -1,64 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Bits;
import java.io.IOException;
/**
* A filter that matches no docs.
*/
public class MatchNoDocsFilter extends Filter {
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
return null;
}
@Override
public int hashCode() {
return this.getClass().hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null) {
return false;
}
if (obj.getClass() == this.getClass()) {
return true;
}
return false;
}
@Override
public String toString(String field) {
return "MatchNoDocsFilter";
}
}

View File

@ -1,78 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Bits;
import org.elasticsearch.common.lucene.docset.AllDocIdSet;
import org.elasticsearch.common.lucene.docset.DocIdSets;
import org.elasticsearch.common.lucene.docset.NotDocIdSet;
import java.io.IOException;
/**
*
*/
public class NotFilter extends Filter {
private final Filter filter;
public NotFilter(Filter filter) {
this.filter = filter;
}
public Filter filter() {
return filter;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
DocIdSet set = filter.getDocIdSet(context, null);
DocIdSet notSet;
if (DocIdSets.isEmpty(set)) {
notSet = new AllDocIdSet(context.reader().maxDoc());
} else {
notSet = new NotDocIdSet(set, context.reader().maxDoc());
}
return BitsFilteredDocIdSet.wrap(notSet, acceptDocs);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NotFilter notFilter = (NotFilter) o;
return !(filter != null ? !filter.equals(notFilter.filter) : notFilter.filter != null);
}
@Override
public String toString(String field) {
return "NotFilter(" + filter + ")";
}
@Override
public int hashCode() {
return filter != null ? filter.hashCode() : 0;
}
}

View File

@ -1,108 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Bits;
import org.elasticsearch.common.lucene.docset.DocIdSets;
import org.elasticsearch.common.lucene.docset.OrDocIdSet;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
*
*/
public class OrFilter extends Filter {
private final List<? extends Filter> filters;
public OrFilter(List<? extends Filter> filters) {
this.filters = filters;
}
public List<? extends Filter> filters() {
return filters;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
if (filters.size() == 1) {
return filters.get(0).getDocIdSet(context, acceptDocs);
}
List<DocIdSet> sets = new ArrayList<>(filters.size());
for (int i = 0; i < filters.size(); i++) {
DocIdSet set = filters.get(i).getDocIdSet(context, null);
if (DocIdSets.isEmpty(set)) { // none matching for this filter, continue
continue;
}
sets.add(set);
}
if (sets.size() == 0) {
return null;
}
DocIdSet set;
if (sets.size() == 1) {
set = sets.get(0);
} else {
set = new OrDocIdSet(sets.toArray(new DocIdSet[sets.size()]));
}
return BitsFilteredDocIdSet.wrap(set, acceptDocs);
}
@Override
public int hashCode() {
int hash = 7;
hash = 31 * hash + (null == filters ? 0 : filters.hashCode());
return hash;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if ((obj == null) || (obj.getClass() != this.getClass()))
return false;
OrFilter other = (OrFilter) obj;
return equalFilters(filters, other.filters);
}
@Override
public String toString(String field) {
StringBuilder builder = new StringBuilder();
for (Filter filter : filters) {
if (builder.length() > 0) {
builder.append(' ');
}
builder.append(filter);
}
return builder.toString();
}
private boolean equalFilters(List<? extends Filter> filters1, List<? extends Filter> filters2) {
return (filters1 == filters2) || ((filters1 != null) && filters1.equals(filters2));
}
}

View File

@ -19,7 +19,15 @@
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.search.*;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.index.query.QueryParseContext;
@ -33,17 +41,8 @@ import java.util.regex.Pattern;
*/
public class Queries {
/**
* A match all docs filter. Note, requires no caching!.
*/
public final static Filter MATCH_ALL_FILTER = new MatchAllDocsFilter();
public final static Filter MATCH_NO_FILTER = new MatchNoDocsFilter();
public static Query newMatchAllQuery() {
// We don't use MatchAllDocsQuery, its slower than the one below ... (much slower)
// NEVER cache this XConstantScore Query it's not immutable and based on #3521
// some code might set a boost on this query.
return new ConstantScoreQuery(MATCH_ALL_FILTER);
return new MatchAllDocsQuery();
}
/** Return a query that matches no document. */
@ -51,6 +50,22 @@ public class Queries {
return new BooleanQuery();
}
public static Filter newMatchAllFilter() {
return wrap(newMatchAllQuery());
}
public static Filter newMatchNoDocsFilter() {
return wrap(newMatchNoDocsQuery());
}
/** Return a query that matches all documents but those that match the given query. */
public static Query not(Query q) {
BooleanQuery bq = new BooleanQuery();
bq.add(new MatchAllDocsQuery(), Occur.MUST);
bq.add(q, Occur.MUST_NOT);
return bq;
}
public static boolean isNegativeQuery(Query q) {
if (!(q instanceof BooleanQuery)) {
return false;
@ -76,11 +91,12 @@ public class Queries {
public static boolean isConstantMatchAllQuery(Query query) {
if (query instanceof ConstantScoreQuery) {
ConstantScoreQuery scoreQuery = (ConstantScoreQuery) query;
if (scoreQuery.getQuery() instanceof MatchAllDocsFilter || scoreQuery.getQuery() instanceof MatchAllDocsQuery) {
return isConstantMatchAllQuery(((ConstantScoreQuery) query).getQuery());
} else if (query instanceof QueryWrapperFilter) {
return isConstantMatchAllQuery(((QueryWrapperFilter) query).getQuery());
} else if (query instanceof MatchAllDocsQuery) {
return true;
}
}
return false;
}
@ -151,10 +167,15 @@ public class Queries {
*/
@SuppressForbidden(reason = "QueryWrapperFilter cachability")
public static Filter wrap(Query query, QueryParseContext context) {
if (context.requireCustomQueryWrappingFilter() || CustomQueryWrappingFilter.shouldUseCustomQueryWrappingFilter(query)) {
if ((context != null && context.requireCustomQueryWrappingFilter()) || CustomQueryWrappingFilter.shouldUseCustomQueryWrappingFilter(query)) {
return new CustomQueryWrappingFilter(query);
} else {
return new QueryWrapperFilter(query);
}
}
/** Wrap as a {@link Filter}. */
public static Filter wrap(Query query) {
return wrap(query, null);
}
}

View File

@ -1,110 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.MultiTermQueryWrapperFilter;
import org.apache.lucene.search.RegexpQuery;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.automaton.Operations;
import org.apache.lucene.util.automaton.RegExp;
import java.io.IOException;
/**
* A lazy regexp filter which only builds the automaton on the first call to {@link #getDocIdSet(LeafReaderContext, Bits)}.
* It is not thread safe (so can't be applied on multiple segments concurrently)
*/
public class RegexpFilter extends Filter {
private final Term term;
private final int flags;
// use delegation here to support efficient implementation of equals & hashcode for this
// filter (as it will be used as the filter cache key)
private final InternalFilter filter;
public RegexpFilter(Term term) {
this(term, RegExp.ALL);
}
public RegexpFilter(Term term, int flags) {
this(term, flags, Operations.DEFAULT_MAX_DETERMINIZED_STATES);
}
public RegexpFilter(Term term, int flags, int maxDeterminizedStates) {
filter = new InternalFilter(term, flags, maxDeterminizedStates);
this.term = term;
this.flags = flags;
}
public String field() {
return term.field();
}
public String regexp() {
return term.text();
}
public int flags() {
return flags;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
return filter.getDocIdSet(context, acceptDocs);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
org.elasticsearch.common.lucene.search.RegexpFilter that = (org.elasticsearch.common.lucene.search.RegexpFilter) o;
if (flags != that.flags) return false;
if (term != null ? !term.equals(that.term) : that.term != null) return false;
return true;
}
@Override
public int hashCode() {
int result = term != null ? term.hashCode() : 0;
result = 31 * result + flags;
return result;
}
@Override
public String toString(String field) {
// todo should we also show the flags?
return term.field() + ":" + term.text();
}
static class InternalFilter extends MultiTermQueryWrapperFilter<RegexpQuery> {
public InternalFilter(Term term, int flags, int maxDeterminizedStates) {
super(new RegexpQuery(term, flags, maxDeterminizedStates));
}
}
}

View File

@ -1,377 +0,0 @@
package org.elasticsearch.common.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.FilterClause;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.lucene.docset.AllDocIdSet;
import org.elasticsearch.common.lucene.docset.AndDocIdSet;
import org.elasticsearch.common.lucene.docset.DocIdSets;
import org.elasticsearch.common.lucene.docset.NotDocIdSet;
import org.elasticsearch.common.lucene.docset.OrDocIdSet.OrBits;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
/**
* Similar to {@link org.apache.lucene.queries.BooleanFilter}.
* <p/>
* Our own variance mainly differs by the fact that we pass the acceptDocs down to the filters
* and don't filter based on them at the end. Our logic is a bit different, and we filter based on that
* at the top level filter chain.
*/
public class XBooleanFilter extends Filter implements Iterable<FilterClause> {
private static final Comparator<DocIdSetIterator> COST_DESCENDING = new Comparator<DocIdSetIterator>() {
@Override
public int compare(DocIdSetIterator o1, DocIdSetIterator o2) {
return Long.compare(o2.cost(), o1.cost());
}
};
private static final Comparator<DocIdSetIterator> COST_ASCENDING = new Comparator<DocIdSetIterator>() {
@Override
public int compare(DocIdSetIterator o1, DocIdSetIterator o2) {
return Long.compare(o1.cost(), o2.cost());
}
};
final List<FilterClause> clauses = new ArrayList<>();
/**
* Returns the a DocIdSetIterator representing the Boolean composition
* of the filters that have been added.
*/
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
final int maxDoc = context.reader().maxDoc();
// the 0-clauses case is ambiguous because an empty OR filter should return nothing
// while an empty AND filter should return all docs, so we handle this case explicitely
if (clauses.isEmpty()) {
return null;
}
// optimize single case...
if (clauses.size() == 1) {
FilterClause clause = clauses.get(0);
DocIdSet set = clause.getFilter().getDocIdSet(context, acceptDocs);
if (clause.getOccur() == Occur.MUST_NOT) {
if (DocIdSets.isEmpty(set)) {
return new AllDocIdSet(maxDoc);
} else {
return new NotDocIdSet(set, maxDoc);
}
}
// SHOULD or MUST, just return the set...
if (DocIdSets.isEmpty(set)) {
return null;
}
return set;
}
// We have several clauses, try to organize things to make it easier to process
List<DocIdSetIterator> shouldIterators = new ArrayList<>();
List<Bits> shouldBits = new ArrayList<>();
boolean hasShouldClauses = false;
List<DocIdSetIterator> requiredIterators = new ArrayList<>();
List<DocIdSetIterator> excludedIterators = new ArrayList<>();
List<Bits> requiredBits = new ArrayList<>();
List<Bits> excludedBits = new ArrayList<>();
for (FilterClause clause : clauses) {
DocIdSet set = clause.getFilter().getDocIdSet(context, null);
DocIdSetIterator it = null;
Bits bits = null;
if (DocIdSets.isEmpty(set) == false) {
it = set.iterator();
if (it != null) {
bits = set.bits();
}
}
switch (clause.getOccur()) {
case SHOULD:
hasShouldClauses = true;
if (it == null) {
// continue, but we recorded that there is at least one should clause
// so that if all iterators are null we know that nothing matches this
// filter since at least one SHOULD clause needs to match
} else if (bits != null && DocIdSets.isBroken(it)) {
shouldBits.add(bits);
} else {
shouldIterators.add(it);
}
break;
case MUST:
if (it == null) {
// no documents matched a clause that is compulsory, then nothing matches at all
return null;
} else if (bits != null && DocIdSets.isBroken(it)) {
requiredBits.add(bits);
} else {
requiredIterators.add(it);
}
break;
case MUST_NOT:
if (it == null) {
// ignore
} else if (bits != null && DocIdSets.isBroken(it)) {
excludedBits.add(bits);
} else {
excludedIterators.add(it);
}
break;
default:
throw new AssertionError();
}
}
// Since BooleanFilter requires that at least one SHOULD clause matches,
// transform the SHOULD clauses into a MUST clause
if (hasShouldClauses) {
if (shouldIterators.isEmpty() && shouldBits.isEmpty()) {
// we had should clauses, but they all produced empty sets
// yet BooleanFilter requires that at least one clause matches
// so it means we do not match anything
return null;
} else if (shouldIterators.size() == 1 && shouldBits.isEmpty()) {
requiredIterators.add(shouldIterators.get(0));
} else {
// apply high-cardinality should clauses first
CollectionUtil.timSort(shouldIterators, COST_DESCENDING);
BitDocIdSet.Builder shouldBuilder = null;
for (DocIdSetIterator it : shouldIterators) {
if (shouldBuilder == null) {
shouldBuilder = new BitDocIdSet.Builder(maxDoc);
}
shouldBuilder.or(it);
}
if (shouldBuilder != null && shouldBits.isEmpty() == false) {
// we have both iterators and bits, there is no way to compute
// the union efficiently, so we just transform the iterators into
// bits
// add first since these are fast bits
shouldBits.add(0, shouldBuilder.build().bits());
shouldBuilder = null;
}
if (shouldBuilder == null) {
// only bits
assert shouldBits.size() >= 1;
if (shouldBits.size() == 1) {
requiredBits.add(shouldBits.get(0));
} else {
requiredBits.add(new OrBits(shouldBits.toArray(new Bits[shouldBits.size()])));
}
} else {
assert shouldBits.isEmpty();
// only iterators, we can add the merged iterator to the list of required iterators
requiredIterators.add(shouldBuilder.build().iterator());
}
}
} else {
assert shouldIterators.isEmpty();
assert shouldBits.isEmpty();
}
// From now on, we don't have to care about SHOULD clauses anymore since we upgraded
// them to required clauses (if necessary)
// cheap iterators first to make intersection faster
CollectionUtil.timSort(requiredIterators, COST_ASCENDING);
CollectionUtil.timSort(excludedIterators, COST_ASCENDING);
// Intersect iterators
BitDocIdSet.Builder res = null;
for (DocIdSetIterator iterator : requiredIterators) {
if (res == null) {
res = new BitDocIdSet.Builder(maxDoc);
res.or(iterator);
} else {
res.and(iterator);
}
}
for (DocIdSetIterator iterator : excludedIterators) {
if (res == null) {
res = new BitDocIdSet.Builder(maxDoc, true);
}
res.andNot(iterator);
}
// Transform the excluded bits into required bits
if (excludedBits.isEmpty() == false) {
Bits excluded;
if (excludedBits.size() == 1) {
excluded = excludedBits.get(0);
} else {
excluded = new OrBits(excludedBits.toArray(new Bits[excludedBits.size()]));
}
requiredBits.add(new NotDocIdSet.NotBits(excluded));
}
// The only thing left to do is to intersect 'res' with 'requiredBits'
// the main doc id set that will drive iteration
DocIdSet main;
if (res == null) {
main = new AllDocIdSet(maxDoc);
} else {
main = res.build();
}
// apply accepted docs and compute the bits to filter with
// accepted docs are added first since they are fast and will help not computing anything on deleted docs
if (acceptDocs != null) {
requiredBits.add(0, acceptDocs);
}
// the random-access filter that we will apply to 'main'
Bits filter;
if (requiredBits.isEmpty()) {
filter = null;
} else if (requiredBits.size() == 1) {
filter = requiredBits.get(0);
} else {
filter = new AndDocIdSet.AndBits(requiredBits.toArray(new Bits[requiredBits.size()]));
}
return BitsFilteredDocIdSet.wrap(main, filter);
}
/**
* Adds a new FilterClause to the Boolean Filter container
*
* @param filterClause A FilterClause object containing a Filter and an Occur parameter
*/
public void add(FilterClause filterClause) {
clauses.add(filterClause);
}
public final void add(Filter filter, Occur occur) {
add(new FilterClause(filter, occur));
}
/**
* Returns the list of clauses
*/
public List<FilterClause> clauses() {
return clauses;
}
/**
* Returns an iterator on the clauses in this query. It implements the {@link Iterable} interface to
* make it possible to do:
* <pre class="prettyprint">for (FilterClause clause : booleanFilter) {}</pre>
*/
@Override
public final Iterator<FilterClause> iterator() {
return clauses().iterator();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if ((obj == null) || (obj.getClass() != this.getClass())) {
return false;
}
final XBooleanFilter other = (XBooleanFilter) obj;
return clauses.equals(other.clauses);
}
@Override
public int hashCode() {
return 657153718 ^ clauses.hashCode();
}
/**
* Prints a user-readable version of this Filter.
*/
@Override
public String toString(String field) {
final StringBuilder buffer = new StringBuilder("BooleanFilter(");
final int minLen = buffer.length();
for (final FilterClause c : clauses) {
if (buffer.length() > minLen) {
buffer.append(' ');
}
buffer.append(c);
}
return buffer.append(')').toString();
}
static class ResultClause {
public final DocIdSet docIdSet;
public final Bits bits;
public final FilterClause clause;
DocIdSetIterator docIdSetIterator;
ResultClause(DocIdSet docIdSet, Bits bits, FilterClause clause) {
this.docIdSet = docIdSet;
this.bits = bits;
this.clause = clause;
}
/**
* @return An iterator, but caches it for subsequent usage. Don't use if iterator is consumed in one invocation.
*/
DocIdSetIterator iterator() throws IOException {
if (docIdSetIterator != null) {
return docIdSetIterator;
} else {
return docIdSetIterator = docIdSet.iterator();
}
}
}
static boolean iteratorMatch(DocIdSetIterator docIdSetIterator, int target) throws IOException {
assert docIdSetIterator != null;
int current = docIdSetIterator.docID();
if (current == DocIdSetIterator.NO_MORE_DOCS || target < current) {
return false;
} else {
if (current == target) {
return true;
} else {
return docIdSetIterator.advance(target) == target;
}
}
}
}

View File

@ -1,40 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.search.DocIdSetIterator;
import org.elasticsearch.common.lucene.docset.DocIdSets;
/**
* Extension of {@link DocIdSetIterator} that allows to know if iteration is
* implemented efficiently.
*/
public abstract class XDocIdSetIterator extends DocIdSetIterator {
/**
* Return <tt>true</tt> if this iterator cannot both
* {@link DocIdSetIterator#nextDoc} and {@link DocIdSetIterator#advance}
* in sub-linear time.
*
* Do not call this method directly, use {@link DocIdSets#isBroken}.
*/
public abstract boolean isBroken();
}

View File

@ -0,0 +1,375 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util;
import com.google.common.base.Charsets;
import com.google.common.collect.Sets;
import com.google.common.primitives.Ints;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchIllegalStateException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.*;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.*;
/**
*/
public class MultiDataPathUpgrader {
private final NodeEnvironment nodeEnvironment;
private final ESLogger logger = Loggers.getLogger(getClass());
/**
* Creates a new upgrader instance
* @param nodeEnvironment the node env to operate on.
*
*/
public MultiDataPathUpgrader(NodeEnvironment nodeEnvironment) {
this.nodeEnvironment = nodeEnvironment;
}
/**
* Upgrades the given shard Id from multiple shard paths into the given target path.
*
* @see #pickShardPath(org.elasticsearch.index.shard.ShardId)
*/
public void upgrade(ShardId shard, ShardPath targetPath) throws IOException {
final Path[] paths = nodeEnvironment.availableShardPaths(shard); // custom data path doesn't need upgrading
if (isTargetPathConfigured(paths, targetPath) == false) {
throw new IllegalArgumentException("shard path must be one of the shards data paths");
}
assert needsUpgrading(shard) : "Should not upgrade a path that needs no upgrading";
logger.info("{} upgrading multi data dir to {}", shard, targetPath.getDataPath());
final ShardStateMetaData loaded = ShardStateMetaData.FORMAT.loadLatestState(logger, paths);
if (loaded == null) {
throw new IllegalStateException(shard + " no shard state found in any of: " + Arrays.toString(paths) + " please check and remove them if possible");
}
logger.info("{} loaded shard state {}", shard, loaded);
ShardStateMetaData.FORMAT.write(loaded, loaded.version, targetPath.getShardStatePath());
Files.createDirectories(targetPath.resolveIndex());
try (SimpleFSDirectory directory = new SimpleFSDirectory(targetPath.resolveIndex())) {
try (final Lock lock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME)) {
if (lock.obtain(5000)) {
upgradeFiles(shard, targetPath, targetPath.resolveIndex(), ShardPath.INDEX_FOLDER_NAME, paths);
} else {
throw new IllegalStateException("Can't obtain lock on " + targetPath.resolveIndex());
}
}
}
upgradeFiles(shard, targetPath, targetPath.resolveTranslog(), ShardPath.TRANSLOG_FOLDER_NAME, paths);
logger.info("{} wipe upgraded directories", shard);
for (Path path : paths) {
if (path.equals(targetPath.getShardStatePath()) == false) {
logger.info("{} wipe shard directories: [{}]", shard, path);
IOUtils.rm(path);
}
}
if (FileSystemUtils.files(targetPath.resolveIndex()).length == 0) {
throw new IllegalStateException("index folder [" + targetPath.resolveIndex() + "] is empty");
}
if (FileSystemUtils.files(targetPath.resolveTranslog()).length == 0) {
throw new IllegalStateException("translog folder [" + targetPath.resolveTranslog() + "] is empty");
}
}
/**
* Runs check-index on the target shard and throws an exception if it failed
*/
public void checkIndex(ShardPath targetPath) throws IOException {
BytesStreamOutput os = new BytesStreamOutput();
PrintStream out = new PrintStream(os, false, Charsets.UTF_8.name());
try (Directory directory = new SimpleFSDirectory(targetPath.resolveIndex());
final CheckIndex checkIndex = new CheckIndex(directory)) {
checkIndex.setInfoStream(out);
CheckIndex.Status status = checkIndex.checkIndex();
out.flush();
if (!status.clean) {
logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
throw new ElasticsearchIllegalStateException("index check failure");
}
}
}
/**
* Returns true iff the given shard needs upgrading.
*/
public boolean needsUpgrading(ShardId shard) {
final Path[] paths = nodeEnvironment.availableShardPaths(shard);
// custom data path doesn't need upgrading neither single path envs
if (paths.length > 1) {
int numPathsExist = 0;
for (Path path : paths) {
if (Files.exists(path.resolve(MetaDataStateFormat.STATE_DIR_NAME))) {
numPathsExist++;
if (numPathsExist > 1) {
return true;
}
}
}
}
return false;
}
/**
* Picks a target ShardPath to allocate and upgrade the given shard to. It picks the target based on a simple
* heuristic:
* <ul>
* <li>if the smallest datapath has 2x more space available that the shards total size the datapath with the most bytes for that shard is picked to minimize the amount of bytes to copy</li>
* <li>otherwise the largest available datapath is used as the target no matter how big of a slice of the shard it already holds.</li>
* </ul>
*/
public ShardPath pickShardPath(ShardId shard) throws IOException {
if (needsUpgrading(shard) == false) {
throw new IllegalStateException("Shard doesn't need upgrading");
}
final NodeEnvironment.NodePath[] paths = nodeEnvironment.nodePaths();
// if we need upgrading make sure we have all paths.
for (NodeEnvironment.NodePath path : paths) {
Files.createDirectories(path.resolve(shard));
}
final ShardFileInfo[] shardFileInfo = getShardFileInfo(shard, paths);
long totalBytesUsedByShard = 0;
long leastUsableSpace = Long.MAX_VALUE;
long mostUsableSpace = Long.MIN_VALUE;
assert shardFileInfo.length == nodeEnvironment.availableShardPaths(shard).length;
for (ShardFileInfo info : shardFileInfo) {
totalBytesUsedByShard += info.spaceUsedByShard;
leastUsableSpace = Math.min(leastUsableSpace, info.usableSpace + info.spaceUsedByShard);
mostUsableSpace = Math.max(mostUsableSpace, info.usableSpace + info.spaceUsedByShard);
}
if (mostUsableSpace < totalBytesUsedByShard) {
throw new IllegalStateException("Can't upgrade path available space: " + new ByteSizeValue(mostUsableSpace) + " required space: " + new ByteSizeValue(totalBytesUsedByShard));
}
ShardFileInfo target = shardFileInfo[0];
if (leastUsableSpace >= (2 * totalBytesUsedByShard)) {
for (ShardFileInfo info : shardFileInfo) {
if (info.spaceUsedByShard > target.spaceUsedByShard) {
target = info;
}
}
} else {
for (ShardFileInfo info : shardFileInfo) {
if (info.usableSpace > target.usableSpace) {
target = info;
}
}
}
return new ShardPath(target.path, target.path, IndexMetaData.INDEX_UUID_NA_VALUE /* we don't know */, shard);
}
private ShardFileInfo[] getShardFileInfo(ShardId shard, NodeEnvironment.NodePath[] paths) throws IOException {
final ShardFileInfo[] info = new ShardFileInfo[paths.length];
for (int i = 0; i < info.length; i++) {
Path path = paths[i].resolve(shard);
final long usabelSpace = getUsabelSpace(paths[i]);
info[i] = new ShardFileInfo(path, usabelSpace, getSpaceUsedByShard(path));
}
return info;
}
protected long getSpaceUsedByShard(Path path) throws IOException {
final long[] spaceUsedByShard = new long[] {0};
if (Files.exists(path)) {
Files.walkFileTree(path, new FileVisitor<Path>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (attrs.isRegularFile()) {
spaceUsedByShard[0] += attrs.size();
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
return FileVisitResult.CONTINUE;
}
});
}
return spaceUsedByShard[0];
}
protected long getUsabelSpace(NodeEnvironment.NodePath path) throws IOException {
FileStore fileStore = path.fileStore;
return fileStore.getUsableSpace();
}
static class ShardFileInfo {
final Path path;
final long usableSpace;
final long spaceUsedByShard;
ShardFileInfo(Path path, long usableSpace, long spaceUsedByShard) {
this.path = path;
this.usableSpace = usableSpace;
this.spaceUsedByShard = spaceUsedByShard;
}
}
private void upgradeFiles(ShardId shard, ShardPath targetPath, final Path targetDir, String folderName, Path[] paths) throws IOException {
List<Path> movedFiles = new ArrayList<>();
for (Path path : paths) {
if (path.equals(targetPath.getDataPath()) == false) {
final Path sourceDir = path.resolve(folderName);
if (Files.exists(sourceDir)) {
logger.info("{} upgrading [{}] from [{}] to [{}]", shard, folderName, sourceDir, targetDir);
try (DirectoryStream<Path> stream = Files.newDirectoryStream(sourceDir)) {
Files.createDirectories(targetDir);
for (Path file : stream) {
if (IndexWriter.WRITE_LOCK_NAME.equals(file.getFileName().toString()) || Files.isDirectory(file)) {
continue; // skip write.lock
}
logger.info("{} move file [{}] size: [{}]", shard, file.getFileName(), Files.size(file));
final Path targetFile = targetDir.resolve(file.getFileName());
/* We are pessimistic and do a copy first to the other path and then and atomic move to rename it such that
in the worst case the file exists twice but is never lost or half written.*/
final Path targetTempFile = Files.createTempFile(targetDir, "upgrade_", "_" + file.getFileName().toString());
Files.copy(file, targetTempFile, StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.REPLACE_EXISTING);
Files.move(targetTempFile, targetFile, StandardCopyOption.ATOMIC_MOVE); // we are on the same FS - this must work otherwise all bets are off
Files.delete(file);
movedFiles.add(targetFile);
}
}
}
}
}
if (movedFiles.isEmpty() == false) {
// fsync later it might be on disk already
logger.info("{} fsync files", shard);
for (Path moved : movedFiles) {
logger.info("{} syncing [{}]", shard, moved.getFileName());
IOUtils.fsync(moved, false);
}
logger.info("{} syncing directory [{}]", shard, targetDir);
IOUtils.fsync(targetDir, true);
}
}
/**
* Returns <code>true</code> iff the target path is one of the given paths.
*/
private boolean isTargetPathConfigured(final Path[] paths, ShardPath targetPath) {
for (Path path : paths) {
if (path.equals(targetPath.getDataPath())) {
return true;
}
}
return false;
}
/**
* Runs an upgrade on all shards located under the given node environment if there is more than 1 data.path configured
* otherwise this method will return immediately.
*/
public static void upgradeMultiDataPath(NodeEnvironment nodeEnv, ESLogger logger) throws IOException {
if (nodeEnv.nodeDataPaths().length > 1) {
final MultiDataPathUpgrader upgrader = new MultiDataPathUpgrader(nodeEnv);
final Set<String> allIndices = nodeEnv.findAllIndices();
for (String index : allIndices) {
for (ShardId shardId : findAllShardIds(nodeEnv.indexPaths(new Index(index)))) {
try (ShardLock lock = nodeEnv.shardLock(shardId, 0)) {
if (upgrader.needsUpgrading(shardId)) {
final ShardPath shardPath = upgrader.pickShardPath(shardId);
upgrader.upgrade(shardId, shardPath);
// we have to check if the index path exists since we might
// have only upgraded the shard state that is written under /indexname/shardid/_state
// in the case we upgraded a dedicated index directory index
if (Files.exists(shardPath.resolveIndex())) {
upgrader.checkIndex(shardPath);
}
} else {
logger.debug("{} no upgrade needed - already upgraded");
}
}
}
}
}
}
private static Set<ShardId> findAllShardIds(Path... locations) throws IOException {
final Set<ShardId> shardIds = Sets.newHashSet();
for (final Path location : locations) {
if (Files.isDirectory(location)) {
shardIds.addAll(findAllShardsForIndex(location));
}
}
return shardIds;
}
private static Set<ShardId> findAllShardsForIndex(Path indexPath) throws IOException {
Set<ShardId> shardIds = new HashSet<>();
if (Files.isDirectory(indexPath)) {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
String currentIndex = indexPath.getFileName().toString();
for (Path shardPath : stream) {
if (Files.isDirectory(shardPath)) {
Integer shardId = Ints.tryParse(shardPath.getFileName().toString());
if (shardId != null) {
ShardId id = new ShardId(currentIndex, shardId);
shardIds.add(id);
}
}
}
}
}
return shardIds;
}
}

Some files were not shown because too many files have changed in this diff Show More